From 0793eb926933034997cc2383adc414d080643e77 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 20 Sep 2023 23:16:01 -0400 Subject: [PATCH 001/420] Only clear clipboard when copying nodes. --- web/scripts/app.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index f0bb8640ce4..5efe08c0055 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -753,8 +753,9 @@ export class ComfyApp { // Default system copy return; } + // copy nodes and clear clipboard - if (this.canvas.selected_nodes) { + if (e.target.className === "litegraph" && this.canvas.selected_nodes) { this.canvas.copyToClipboard(); e.clipboardData.setData('text', ' '); //clearData doesn't remove images from clipboard e.preventDefault(); From 492db2de8db7e082addf131b40adb4a1b7535821 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 21 Sep 2023 01:14:42 -0400 Subject: [PATCH 002/420] Allow having a different pooled output for each image in a batch. --- comfy/model_base.py | 4 ++-- comfy/samplers.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index ca154dba040..ed2dc83e4e0 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -181,7 +181,7 @@ def encode_adm(self, **kwargs): out.append(self.embedder(torch.Tensor([crop_h]))) out.append(self.embedder(torch.Tensor([crop_w]))) out.append(self.embedder(torch.Tensor([aesthetic_score]))) - flat = torch.flatten(torch.cat(out))[None, ] + flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1) return torch.cat((clip_pooled.to(flat.device), flat), dim=1) class SDXL(BaseModel): @@ -206,5 +206,5 @@ def encode_adm(self, **kwargs): out.append(self.embedder(torch.Tensor([crop_w]))) out.append(self.embedder(torch.Tensor([target_height]))) out.append(self.embedder(torch.Tensor([target_width]))) - flat = torch.flatten(torch.cat(out))[None, ] + flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1) return torch.cat((clip_pooled.to(flat.device), flat), dim=1) diff --git a/comfy/samplers.py b/comfy/samplers.py index 57673a02957..e3192ca58f4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -7,6 +7,7 @@ from .ldm.modules.diffusionmodules.util import make_ddim_timesteps import math from comfy import model_base +import comfy.utils def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) return abs(a*b) // math.gcd(a, b) @@ -538,7 +539,7 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): if adm_out is not None: x[1] = x[1].copy() - x[1]["adm_encoded"] = torch.cat([adm_out] * batch_size).to(device) + x[1]["adm_encoded"] = comfy.utils.repeat_to_batch_size(adm_out, batch_size).to(device) return conds From 422d16c027009cd6165c86179dad937166de5312 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 21 Sep 2023 22:23:01 -0400 Subject: [PATCH 003/420] Add some nodes to add, subtract and multiply latents. --- comfy_extras/nodes_latent.py | 74 ++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 75 insertions(+) create mode 100644 comfy_extras/nodes_latent.py diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py new file mode 100644 index 00000000000..b1823d7a1e2 --- /dev/null +++ b/comfy_extras/nodes_latent.py @@ -0,0 +1,74 @@ +import comfy.utils + +def reshape_latent_to(target_shape, latent): + if latent.shape[1:] != target_shape[1:]: + latent.movedim(1, -1) + latent = comfy.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center") + latent.movedim(-1, 1) + return comfy.utils.repeat_to_batch_size(latent, target_shape[0]) + + +class LatentAdd: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples1, samples2): + samples_out = samples1.copy() + + s1 = samples1["samples"] + s2 = samples2["samples"] + + s2 = reshape_latent_to(s1.shape, s2) + samples_out["samples"] = s1 + s2 + return (samples_out,) + +class LatentSubtract: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples1, samples2): + samples_out = samples1.copy() + + s1 = samples1["samples"] + s2 = samples2["samples"] + + s2 = reshape_latent_to(s1.shape, s2) + samples_out["samples"] = s1 - s2 + return (samples_out,) + +class LatentMuliply: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples": ("LATENT",), + "multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples, multiplier): + samples_out = samples.copy() + + s1 = samples["samples"] + samples_out["samples"] = s1 * multiplier + return (samples_out,) + +NODE_CLASS_MAPPINGS = { + "LatentAdd": LatentAdd, + "LatentSubtract": LatentSubtract, + "LatentMuliply": LatentMuliply, +} diff --git a/nodes.py b/nodes.py index 18d82ea8094..6e0d43747a3 100644 --- a/nodes.py +++ b/nodes.py @@ -1772,6 +1772,7 @@ def load_custom_nodes(): print() def init_custom_nodes(): + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_latent.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) From 29ccf9f471e3b2ad4f4a08ba9f34698d357f8547 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 22 Sep 2023 01:33:46 -0400 Subject: [PATCH 004/420] Fix typo. --- comfy_extras/nodes_latent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index b1823d7a1e2..001de39fceb 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -48,7 +48,7 @@ def op(self, samples1, samples2): samples_out["samples"] = s1 - s2 return (samples_out,) -class LatentMuliply: +class LatentMultiply: @classmethod def INPUT_TYPES(s): return {"required": { "samples": ("LATENT",), @@ -70,5 +70,5 @@ def op(self, samples, multiplier): NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, - "LatentMuliply": LatentMuliply, + "LatentMultiply": LatentMultiply, } From afa2399f79e84919645eb69cd8e1ef1d9f1d6bd1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 22 Sep 2023 20:26:47 -0400 Subject: [PATCH 005/420] Add a way to set output block patches to modify the h and hsp. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 6 ++++++ comfy/model_patcher.py | 3 +++ 2 files changed, 9 insertions(+) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 3ce3c2e7b9b..b42637c821a 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -608,6 +608,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo """ transformer_options["original_shape"] = list(x.shape) transformer_options["current_index"] = 0 + transformer_patches = transformer_options.get("patches", {}) assert (y is not None) == ( self.num_classes is not None @@ -644,6 +645,11 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo if ctrl is not None: hsp += ctrl + if "output_block_patch" in transformer_patches: + patch = transformer_patches["output_block_patch"] + for p in patch: + h, hsp = p(h, hsp, transformer_options) + h = th.cat([h, hsp], dim=1) del hsp if len(hs) > 0: diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 10551656e8a..ba505221e77 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -88,6 +88,9 @@ def set_model_attn1_output_patch(self, patch): def set_model_attn2_output_patch(self, patch): self.set_model_patch(patch, "attn2_output_patch") + def set_model_output_block_patch(self, patch): + self.set_model_patch(patch, "output_block_patch") + def model_patches_to(self, device): to = self.model_options["transformer_options"] if "patches" in to: From eec449ca8e4b3741032f7fed9372ba52040eb563 Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Fri, 22 Sep 2023 21:11:27 -0700 Subject: [PATCH 006/420] Allow Intel GPUs to LoRA cast on GPU since it supports BF16 natively. --- comfy/model_management.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 1050c13a42a..8b896372687 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -488,6 +488,8 @@ def cast_to_device(tensor, device, dtype, copy=False): elif tensor.dtype == torch.bfloat16: if hasattr(device, 'type') and device.type.startswith("cuda"): device_supports_cast = True + elif is_intel_xpu(): + device_supports_cast = True if device_supports_cast: if copy: From fd93c759e278f832b149bc5b0150a8b437c48c77 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Sep 2023 00:56:09 -0400 Subject: [PATCH 007/420] Implement FreeU: Free Lunch in Diffusion U-Net node. _for_testing->FreeU --- comfy_extras/nodes_freelunch.py | 56 +++++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 57 insertions(+) create mode 100644 comfy_extras/nodes_freelunch.py diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py new file mode 100644 index 00000000000..535eece399d --- /dev/null +++ b/comfy_extras/nodes_freelunch.py @@ -0,0 +1,56 @@ +#code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License) + +import torch + + +def Fourier_filter(x, threshold, scale): + # FFT + x_freq = torch.fft.fftn(x.float(), dim=(-2, -1)) + x_freq = torch.fft.fftshift(x_freq, dim=(-2, -1)) + + B, C, H, W = x_freq.shape + mask = torch.ones((B, C, H, W), device=x.device) + + crow, ccol = H // 2, W //2 + mask[..., crow - threshold:crow + threshold, ccol - threshold:ccol + threshold] = scale + x_freq = x_freq * mask + + # IFFT + x_freq = torch.fft.ifftshift(x_freq, dim=(-2, -1)) + x_filtered = torch.fft.ifftn(x_freq, dim=(-2, -1)).real + + return x_filtered.to(x.dtype) + + +class FreeU: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "b1": ("FLOAT", {"default": 1.1, "min": 0.0, "max": 10.0, "step": 0.01}), + "b2": ("FLOAT", {"default": 1.2, "min": 0.0, "max": 10.0, "step": 0.01}), + "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), + "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, b1, b2, s1, s2): + def output_block_patch(h, hsp, transformer_options): + if h.shape[1] == 1280: + h[:,:640] = h[:,:640] * b1 + hsp = Fourier_filter(hsp, threshold=1, scale=s1) + if h.shape[1] == 640: + h[:,:320] = h[:,:320] * b2 + hsp = Fourier_filter(hsp, threshold=1, scale=s2) + return h, hsp + + m = model.clone() + m.set_model_output_block_patch(output_block_patch) + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "FreeU": FreeU, +} diff --git a/nodes.py b/nodes.py index 6e0d43747a3..115862607da 100644 --- a/nodes.py +++ b/nodes.py @@ -1782,4 +1782,5 @@ def init_custom_nodes(): load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py")) + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_freelunch.py")) load_custom_nodes() From 05e661e5efb64803ff9d27191185159081a05297 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Sep 2023 12:19:08 -0400 Subject: [PATCH 008/420] FreeU now works with the refiner. --- comfy_extras/nodes_freelunch.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 535eece399d..c3542a7a431 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -37,13 +37,13 @@ def INPUT_TYPES(s): CATEGORY = "_for_testing" def patch(self, model, b1, b2, s1, s2): + model_channels = model.model.model_config.unet_config["model_channels"] + scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} def output_block_patch(h, hsp, transformer_options): - if h.shape[1] == 1280: - h[:,:640] = h[:,:640] * b1 - hsp = Fourier_filter(hsp, threshold=1, scale=s1) - if h.shape[1] == 640: - h[:,:320] = h[:,:320] * b2 - hsp = Fourier_filter(hsp, threshold=1, scale=s2) + scale = scale_dict.get(h.shape[1], None) + if scale is not None: + h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * scale[0] + hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) return h, hsp m = model.clone() From 76cdc809bfe562dc1026784f26ae0b9582016d6b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Sep 2023 18:47:46 -0400 Subject: [PATCH 009/420] Support more controlnet models. --- comfy/controlnet.py | 2 +- comfy/model_detection.py | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index af0df103e81..ea219c7e560 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -354,7 +354,7 @@ def load_controlnet(ckpt_path, model=None): if controlnet_config is None: use_fp16 = comfy.model_management.should_use_fp16() - controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16).unet_config + controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16, True).unet_config controlnet_config.pop("out_channels") controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1] control_model = comfy.cldm.cldm.ControlNet(**controlnet_config) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 372d5a2df48..787c78575ae 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -1,5 +1,5 @@ - -from . import supported_models +import comfy.supported_models +import comfy.supported_models_base def count_blocks(state_dict_keys, prefix_string): count = 0 @@ -109,17 +109,20 @@ def detect_unet_config(state_dict, key_prefix, use_fp16): return unet_config def model_config_from_unet_config(unet_config): - for model_config in supported_models.models: + for model_config in comfy.supported_models.models: if model_config.matches(unet_config): return model_config(unet_config) print("no match", unet_config) return None -def model_config_from_unet(state_dict, unet_key_prefix, use_fp16): +def model_config_from_unet(state_dict, unet_key_prefix, use_fp16, use_base_if_no_match=False): unet_config = detect_unet_config(state_dict, unet_key_prefix, use_fp16) - return model_config_from_unet_config(unet_config) - + model_config = model_config_from_unet_config(unet_config) + if model_config is None and use_base_if_no_match: + return comfy.supported_models_base.BASE(unet_config) + else: + return model_config def unet_config_from_diffusers_unet(state_dict, use_fp16): match = {} From 593b7069e7cc3bf6ce8283849c65280369e4414b Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sun, 24 Sep 2023 12:08:54 -0300 Subject: [PATCH 010/420] Proportional scale latent and image --- nodes.py | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/nodes.py b/nodes.py index 115862607da..0882185a42d 100644 --- a/nodes.py +++ b/nodes.py @@ -967,8 +967,8 @@ class LatentUpscale: @classmethod def INPUT_TYPES(s): return {"required": { "samples": ("LATENT",), "upscale_method": (s.upscale_methods,), - "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}), + "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8}), "crop": (s.crop_methods,)}} RETURN_TYPES = ("LATENT",) FUNCTION = "upscale" @@ -976,8 +976,22 @@ def INPUT_TYPES(s): CATEGORY = "latent" def upscale(self, samples, upscale_method, width, height, crop): - s = samples.copy() - s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop) + if width == 0 and height == 0: + s = samples + else: + s = samples.copy() + + if width == 0: + height = max(64, height) + width = max(64, round(samples["samples"].shape[3] * height / samples["samples"].shape[2])) + elif height == 0: + width = max(64, width) + height = max(64, round(samples["samples"].shape[2] * width / samples["samples"].shape[3])) + else: + width = max(64, width) + height = max(64, height) + + s["samples"] = comfy.utils.common_upscale(samples["samples"], width // 8, height // 8, upscale_method, crop) return (s,) class LatentUpscaleBy: @@ -1429,8 +1443,8 @@ class ImageScale: @classmethod def INPUT_TYPES(s): return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,), - "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "width": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "height": ("INT", {"default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 1}), "crop": (s.crop_methods,)}} RETURN_TYPES = ("IMAGE",) FUNCTION = "upscale" @@ -1438,9 +1452,18 @@ def INPUT_TYPES(s): CATEGORY = "image/upscaling" def upscale(self, image, upscale_method, width, height, crop): - samples = image.movedim(-1,1) - s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop) - s = s.movedim(1,-1) + if width == 0 and height == 0: + s = image + else: + samples = image.movedim(-1,1) + + if width == 0: + width = max(1, round(samples.shape[3] * height / samples.shape[2])) + elif height == 0: + height = max(1, round(samples.shape[2] * width / samples.shape[3])) + + s = comfy.utils.common_upscale(samples, width, height, upscale_method, crop) + s = s.movedim(1,-1) return (s,) class ImageScaleBy: From 77c124c5a17534e347bdebbc1ace807d61416147 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 24 Sep 2023 13:27:57 -0400 Subject: [PATCH 011/420] Fix typo. --- nodes.py | 2 +- web/scripts/app.js | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 115862607da..4739977e413 100644 --- a/nodes.py +++ b/nodes.py @@ -1604,7 +1604,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "ImageBatch": ImageBatch, "ImagePadForOutpaint": ImagePadForOutpaint, "EmptyImage": EmptyImage, - "ConditioningAverage ": ConditioningAverage , + "ConditioningAverage": ConditioningAverage , "ConditioningCombine": ConditioningCombine, "ConditioningConcat": ConditioningConcat, "ConditioningSetArea": ConditioningSetArea, diff --git a/web/scripts/app.js b/web/scripts/app.js index 5efe08c0055..b41c12b8669 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1322,6 +1322,7 @@ export class ComfyApp { for (let n of graphData.nodes) { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader"; + if (n.type == "ConditioningAverage ") n.type = "ConditioningAverage"; //typo fix // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { From f00471cdc8f92c930436cf288f1c12119f638a67 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 24 Sep 2023 18:09:44 -0400 Subject: [PATCH 012/420] Do FreeU fft on CPU if the device doesn't support fft functions. --- comfy_extras/nodes_freelunch.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index c3542a7a431..07a88bd9614 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -39,11 +39,22 @@ def INPUT_TYPES(s): def patch(self, model, b1, b2, s1, s2): model_channels = model.model.model_config.unet_config["model_channels"] scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} + on_cpu_devices = {} + def output_block_patch(h, hsp, transformer_options): scale = scale_dict.get(h.shape[1], None) if scale is not None: h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * scale[0] - hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) + if hsp.device not in on_cpu_devices: + try: + hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) + except: + print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + on_cpu_devices[hsp.device] = True + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + else: + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + return h, hsp m = model.clone() From 42f6d1ebe2b1f53bf491edeac8ca18fd21a12d37 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 25 Sep 2023 01:21:28 -0400 Subject: [PATCH 013/420] Increase maximum batch sizes of empty image nodes. --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 4739977e413..fbe1ee1cb1e 100644 --- a/nodes.py +++ b/nodes.py @@ -891,7 +891,7 @@ def __init__(self, device="cpu"): def INPUT_TYPES(s): return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 64})}} + "batch_size": ("INT", {"default": 1, "min": 1, "max": 1024})}} RETURN_TYPES = ("LATENT",) FUNCTION = "generate" @@ -1503,7 +1503,7 @@ def __init__(self, device="cpu"): def INPUT_TYPES(s): return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 1024}), "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}), }} RETURN_TYPES = ("IMAGE",) From 2381d36e6db8e8150e42ff2ede628db5b00ae26f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 25 Sep 2023 01:46:44 -0400 Subject: [PATCH 014/420] 1024 wasn't enough. --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index fbe1ee1cb1e..04d9ae2cacf 100644 --- a/nodes.py +++ b/nodes.py @@ -891,7 +891,7 @@ def __init__(self, device="cpu"): def INPUT_TYPES(s): return {"required": { "width": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}), "height": ("INT", {"default": 512, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 1024})}} + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} RETURN_TYPES = ("LATENT",) FUNCTION = "generate" @@ -1503,7 +1503,7 @@ def __init__(self, device="cpu"): def INPUT_TYPES(s): return {"required": { "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 1024}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), "color": ("INT", {"default": 0, "min": 0, "max": 0xFFFFFF, "step": 1, "display": "color"}), }} RETURN_TYPES = ("IMAGE",) From 046b4fe0eebffb2e48b1ea9ab5d245a56b2e4c49 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 25 Sep 2023 16:02:21 -0400 Subject: [PATCH 015/420] Support batches of masks in mask composite nodes. --- comfy_extras/nodes_mask.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 43f623a62d2..b4c658a7a57 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -1,6 +1,7 @@ import numpy as np from scipy.ndimage import grey_dilation import torch +import comfy.utils from nodes import MAX_RESOLUTION @@ -8,6 +9,8 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou if resize_source: source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear") + source = comfy.utils.repeat_to_batch_size(source, destination.shape[0]) + x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier)) y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier)) @@ -18,8 +21,8 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou mask = torch.ones_like(source) else: mask = mask.clone() - mask = torch.nn.functional.interpolate(mask[None, None], size=(source.shape[2], source.shape[3]), mode="bilinear") - mask = mask.repeat((source.shape[0], source.shape[1], 1, 1)) + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear") + mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0]) # calculate the bounds of the source that will be overlapping the destination # this prevents the source trying to overwrite latent pixels that are out of bounds @@ -122,7 +125,7 @@ def INPUT_TYPES(s): def image_to_mask(self, image, channel): channels = ["red", "green", "blue"] - mask = image[0, :, :, channels.index(channel)] + mask = image[:, :, :, channels.index(channel)] return (mask,) class ImageColorToMask: From d2cec6cdbf5361413ddf624c72b0b9b2a7a156ee Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 25 Sep 2023 16:19:13 -0400 Subject: [PATCH 016/420] Make mask functions work with batches of masks and images. --- comfy_extras/nodes_mask.py | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index b4c658a7a57..8f87e4cd872 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -144,8 +144,8 @@ def INPUT_TYPES(s): FUNCTION = "image_to_mask" def image_to_mask(self, image, color): - temp = (torch.clamp(image[0], 0, 1.0) * 255.0).round().to(torch.int) - temp = torch.bitwise_left_shift(temp[:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,1], 8) + temp[:,:,2] + temp = (torch.clamp(image, 0, 1.0) * 255.0).round().to(torch.int) + temp = torch.bitwise_left_shift(temp[:,:,:,0], 16) + torch.bitwise_left_shift(temp[:,:,:,1], 8) + temp[:,:,:,2] mask = torch.where(temp == color, 255, 0).float() return (mask,) @@ -167,7 +167,7 @@ def INPUT_TYPES(cls): FUNCTION = "solid" def solid(self, value, width, height): - out = torch.full((height, width), value, dtype=torch.float32, device="cpu") + out = torch.full((1, height, width), value, dtype=torch.float32, device="cpu") return (out,) class InvertMask: @@ -209,7 +209,8 @@ def INPUT_TYPES(cls): FUNCTION = "crop" def crop(self, mask, x, y, width, height): - out = mask[y:y + height, x:x + width] + mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])) + out = mask[:, y:y + height, x:x + width] return (out,) class MaskComposite: @@ -232,27 +233,28 @@ def INPUT_TYPES(cls): FUNCTION = "combine" def combine(self, destination, source, x, y, operation): - output = destination.clone() + output = destination.reshape((-1, destination.shape[-2], destination.shape[-1])).clone() + source = source.reshape((-1, source.shape[-2], source.shape[-1])) left, top = (x, y,) - right, bottom = (min(left + source.shape[1], destination.shape[1]), min(top + source.shape[0], destination.shape[0])) + right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2])) visible_width, visible_height = (right - left, bottom - top,) source_portion = source[:visible_height, :visible_width] destination_portion = destination[top:bottom, left:right] if operation == "multiply": - output[top:bottom, left:right] = destination_portion * source_portion + output[:, top:bottom, left:right] = destination_portion * source_portion elif operation == "add": - output[top:bottom, left:right] = destination_portion + source_portion + output[:, top:bottom, left:right] = destination_portion + source_portion elif operation == "subtract": - output[top:bottom, left:right] = destination_portion - source_portion + output[:, top:bottom, left:right] = destination_portion - source_portion elif operation == "and": - output[top:bottom, left:right] = torch.bitwise_and(destination_portion.round().bool(), source_portion.round().bool()).float() + output[:, top:bottom, left:right] = torch.bitwise_and(destination_portion.round().bool(), source_portion.round().bool()).float() elif operation == "or": - output[top:bottom, left:right] = torch.bitwise_or(destination_portion.round().bool(), source_portion.round().bool()).float() + output[:, top:bottom, left:right] = torch.bitwise_or(destination_portion.round().bool(), source_portion.round().bool()).float() elif operation == "xor": - output[top:bottom, left:right] = torch.bitwise_xor(destination_portion.round().bool(), source_portion.round().bool()).float() + output[:, top:bottom, left:right] = torch.bitwise_xor(destination_portion.round().bool(), source_portion.round().bool()).float() output = torch.clamp(output, 0.0, 1.0) @@ -278,7 +280,7 @@ def INPUT_TYPES(cls): FUNCTION = "feather" def feather(self, mask, left, top, right, bottom): - output = mask.clone() + output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone() left = min(left, output.shape[1]) right = min(right, output.shape[1]) @@ -287,19 +289,19 @@ def feather(self, mask, left, top, right, bottom): for x in range(left): feather_rate = (x + 1.0) / left - output[:, x] *= feather_rate + output[:, :, x] *= feather_rate for x in range(right): feather_rate = (x + 1) / right - output[:, -x] *= feather_rate + output[:, :, -x] *= feather_rate for y in range(top): feather_rate = (y + 1) / top - output[y, :] *= feather_rate + output[:, y, :] *= feather_rate for y in range(bottom): feather_rate = (y + 1) / bottom - output[-y, :] *= feather_rate + output[:, -y, :] *= feather_rate return (output,) From e0efa78b710d0bd213e8f22220fd53c9421906d8 Mon Sep 17 00:00:00 2001 From: Michael Poutre Date: Mon, 25 Sep 2023 21:20:51 -0700 Subject: [PATCH 017/420] chore(CI): Update test-build to use updated version of actions --- .github/workflows/test-build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 421dd5ee492..444d6b2548c 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -20,9 +20,9 @@ jobs: matrix: python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies From d76d71de3fc5e9618226c53f5a4a1a1a6c14b4fe Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Sep 2023 02:45:31 -0400 Subject: [PATCH 018/420] GrowMask can now be used with negative numbers to erode it. --- comfy_extras/nodes_mask.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 8f87e4cd872..aa13cac012c 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -1,5 +1,5 @@ import numpy as np -from scipy.ndimage import grey_dilation +import scipy.ndimage import torch import comfy.utils @@ -311,7 +311,7 @@ def INPUT_TYPES(cls): return { "required": { "mask": ("MASK",), - "expand": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), "tapered_corners": ("BOOLEAN", {"default": True}), }, } @@ -328,8 +328,11 @@ def expand_mask(self, mask, expand, tapered_corners): [1, 1, 1], [c, 1, c]]) output = mask.numpy().copy() + while expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + expand += 1 while expand > 0: - output = grey_dilation(output, footprint=kernel) + output = scipy.ndimage.grey_dilation(output, footprint=kernel) expand -= 1 output = torch.from_numpy(output) return (output,) From 1d36dfb9fe025b716bc66d920b996381f457393d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Sep 2023 02:53:57 -0400 Subject: [PATCH 019/420] GrowMask now works with mask batches. --- comfy_extras/nodes_mask.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index aa13cac012c..af7cb07bfb3 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -327,15 +327,19 @@ def expand_mask(self, mask, expand, tapered_corners): kernel = np.array([[c, 1, c], [1, 1, 1], [c, 1, c]]) - output = mask.numpy().copy() - while expand < 0: - output = scipy.ndimage.grey_erosion(output, footprint=kernel) - expand += 1 - while expand > 0: - output = scipy.ndimage.grey_dilation(output, footprint=kernel) - expand -= 1 - output = torch.from_numpy(output) - return (output,) + mask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])) + out = [] + for m in mask: + output = m.numpy() + while expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + expand += 1 + while expand > 0: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) + expand -= 1 + output = torch.from_numpy(output) + out.append(output) + return (torch.cat(out, dim=0),) From 9546a798fba3c9fc9b6aee26cef46674a184727c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Sep 2023 02:56:40 -0400 Subject: [PATCH 020/420] Make LoadImage and LoadImageMask return masks in batch format. --- nodes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 8a28e127ee7..4abb0d24d74 100644 --- a/nodes.py +++ b/nodes.py @@ -1369,7 +1369,7 @@ def load_image(self, image): mask = 1. - torch.from_numpy(mask) else: mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") - return (image, mask) + return (image, mask.unsqueeze(0)) @classmethod def IS_CHANGED(s, image): @@ -1416,7 +1416,7 @@ def load_image(self, image, channel): mask = 1. - mask else: mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") - return (mask,) + return (mask.unsqueeze(0),) @classmethod def IS_CHANGED(s, image, channel): From 446caf711c9e9ae4cdced65bf3609095b26fcde0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Sep 2023 13:45:15 -0400 Subject: [PATCH 021/420] Sampling code refactor. --- comfy/ldm/models/diffusion/ddim.py | 2 +- comfy/samplers.py | 261 ++++++++++++++++------------- 2 files changed, 150 insertions(+), 113 deletions(-) diff --git a/comfy/ldm/models/diffusion/ddim.py b/comfy/ldm/models/diffusion/ddim.py index befab0075ca..433d48e3064 100644 --- a/comfy/ldm/models/diffusion/ddim.py +++ b/comfy/ldm/models/diffusion/ddim.py @@ -59,7 +59,7 @@ def make_schedule_timesteps(self, ddim_timesteps, ddim_eta=0., verbose=True): @torch.no_grad() def sample_custom(self, ddim_timesteps, - conditioning, + conditioning=None, callback=None, img_callback=None, quantize_x0=False, diff --git a/comfy/samplers.py b/comfy/samplers.py index e3192ca58f4..9afde9da7a4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -544,11 +544,152 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): return conds +class Sampler: + def sample(self): + pass + + def max_denoise(self, model_wrap, sigmas): + return math.isclose(float(model_wrap.sigma_max), float(sigmas[0])) + +class DDIM(Sampler): + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + timesteps = [] + for s in range(sigmas.shape[0]): + timesteps.insert(0, model_wrap.sigma_to_discrete_timestep(sigmas[s])) + noise_mask = None + if denoise_mask is not None: + noise_mask = 1.0 - denoise_mask + + ddim_callback = None + if callback is not None: + total_steps = len(timesteps) - 1 + ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps) + + max_denoise = self.max_denoise(model_wrap, sigmas) + + ddim_sampler = DDIMSampler(model_wrap.inner_model.inner_model, device=noise.device) + ddim_sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False) + z_enc = ddim_sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(noise.device), noise=noise, max_denoise=max_denoise) + samples, _ = ddim_sampler.sample_custom(ddim_timesteps=timesteps, + batch_size=noise.shape[0], + shape=noise.shape[1:], + verbose=False, + eta=0.0, + x_T=z_enc, + x0=latent_image, + img_callback=ddim_callback, + denoise_function=model_wrap.predict_eps_discrete_timestep, + extra_args=extra_args, + mask=noise_mask, + to_zero=sigmas[-1]==0, + end_step=sigmas.shape[0] - 1, + disable_pbar=disable_pbar) + return samples + +class UNIPC(Sampler): + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) + +class UNIPCBH2(Sampler): + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) + +KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", + "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", + "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] + +def ksampler(sampler_name): + class KSAMPLER(Sampler): + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + extra_args["denoise_mask"] = denoise_mask + model_k = KSamplerX0Inpaint(model_wrap) + model_k.latent_image = latent_image + model_k.noise = noise + + if self.max_denoise(model_wrap, sigmas): + noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) + else: + noise = noise * sigmas[0] + + k_callback = None + total_steps = len(sigmas) - 1 + if callback is not None: + k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) + + sigma_min = sigmas[-1] + if sigma_min == 0: + sigma_min = sigmas[-2] + + if latent_image is not None: + noise += latent_image + if sampler_name == "dpm_fast": + samples = k_diffusion_sampling.sample_dpm_fast(model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar) + elif sampler_name == "dpm_adaptive": + samples = k_diffusion_sampling.sample_dpm_adaptive(model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) + else: + samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar) + return samples + return KSAMPLER + + +def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): + positive = positive[:] + negative = negative[:] + + resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], device) + resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], device) + + model_denoise = CFGNoisePredictor(model) + if model.model_type == model_base.ModelType.V_PREDICTION: + model_wrap = CompVisVDenoiser(model_denoise, quantize=True) + else: + model_wrap = k_diffusion_external.CompVisDenoiser(model_denoise, quantize=True) + + calculate_start_end_timesteps(model_wrap, negative) + calculate_start_end_timesteps(model_wrap, positive) + + #make sure each cond area has an opposite one with the same area + for c in positive: + create_cond_with_same_area_if_none(negative, c) + for c in negative: + create_cond_with_same_area_if_none(positive, c) + + pre_run_control(model_wrap, negative + positive) + + apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) + apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) + + if model.is_adm(): + positive = encode_adm(model, positive, noise.shape[0], noise.shape[3], noise.shape[2], device, "positive") + negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") + + if latent_image is not None: + latent_image = model.process_latent_in(latent_image) + + extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} + + cond_concat = None + if hasattr(model, 'concat_keys'): #inpaint + cond_concat = [] + for ck in model.concat_keys: + if denoise_mask is not None: + if ck == "mask": + cond_concat.append(denoise_mask[:,:1]) + elif ck == "masked_image": + cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space + else: + if ck == "mask": + cond_concat.append(torch.ones_like(noise)[:,:1]) + elif ck == "masked_image": + cond_concat.append(blank_inpaint_image_like(noise)) + extra_args["cond_concat"] = cond_concat + + samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) + return model.process_latent_out(samples.to(torch.float32)) + class KSampler: SCHEDULERS = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"] - SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", - "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "ddim", "uni_pc", "uni_pc_bh2"] + SAMPLERS = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}): self.model = model @@ -628,117 +769,13 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N else: return torch.zeros_like(noise) - positive = positive[:] - negative = negative[:] - - resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) - resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) - - calculate_start_end_timesteps(self.model_wrap, negative) - calculate_start_end_timesteps(self.model_wrap, positive) - - #make sure each cond area has an opposite one with the same area - for c in positive: - create_cond_with_same_area_if_none(negative, c) - for c in negative: - create_cond_with_same_area_if_none(positive, c) - - pre_run_control(self.model_wrap, negative + positive) - - apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) - apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) - - if self.model.is_adm(): - positive = encode_adm(self.model, positive, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "positive") - negative = encode_adm(self.model, negative, noise.shape[0], noise.shape[3], noise.shape[2], self.device, "negative") - - if latent_image is not None: - latent_image = self.model.process_latent_in(latent_image) - - extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": self.model_options, "seed":seed} - - cond_concat = None - if hasattr(self.model, 'concat_keys'): #inpaint - cond_concat = [] - for ck in self.model.concat_keys: - if denoise_mask is not None: - if ck == "mask": - cond_concat.append(denoise_mask[:,:1]) - elif ck == "masked_image": - cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space - else: - if ck == "mask": - cond_concat.append(torch.ones_like(noise)[:,:1]) - elif ck == "masked_image": - cond_concat.append(blank_inpaint_image_like(noise)) - extra_args["cond_concat"] = cond_concat - - if sigmas[0] != self.sigmas[0] or (self.denoise is not None and self.denoise < 1.0): - max_denoise = False - else: - max_denoise = True - - if self.sampler == "uni_pc": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) + sampler = UNIPC elif self.sampler == "uni_pc_bh2": - samples = uni_pc.sample_unipc(self.model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=max_denoise, extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) + sampler = UNIPCBH2 elif self.sampler == "ddim": - timesteps = [] - for s in range(sigmas.shape[0]): - timesteps.insert(0, self.model_wrap.sigma_to_discrete_timestep(sigmas[s])) - noise_mask = None - if denoise_mask is not None: - noise_mask = 1.0 - denoise_mask - - ddim_callback = None - if callback is not None: - total_steps = len(timesteps) - 1 - ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps) - - sampler = DDIMSampler(self.model, device=self.device) - sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False) - z_enc = sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(self.device), noise=noise, max_denoise=max_denoise) - samples, _ = sampler.sample_custom(ddim_timesteps=timesteps, - conditioning=positive, - batch_size=noise.shape[0], - shape=noise.shape[1:], - verbose=False, - unconditional_guidance_scale=cfg, - unconditional_conditioning=negative, - eta=0.0, - x_T=z_enc, - x0=latent_image, - img_callback=ddim_callback, - denoise_function=self.model_wrap.predict_eps_discrete_timestep, - extra_args=extra_args, - mask=noise_mask, - to_zero=sigmas[-1]==0, - end_step=sigmas.shape[0] - 1, - disable_pbar=disable_pbar) - + sampler = DDIM else: - extra_args["denoise_mask"] = denoise_mask - self.model_k.latent_image = latent_image - self.model_k.noise = noise - - if max_denoise: - noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) - else: - noise = noise * sigmas[0] - - k_callback = None - total_steps = len(sigmas) - 1 - if callback is not None: - k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) - - if latent_image is not None: - noise += latent_image - if self.sampler == "dpm_fast": - samples = k_diffusion_sampling.sample_dpm_fast(self.model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar) - elif self.sampler == "dpm_adaptive": - samples = k_diffusion_sampling.sample_dpm_adaptive(self.model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) - else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar) + sampler = ksampler(self.sampler) - return self.model.process_latent_out(samples.to(torch.float32)) + return sample(self.model, noise, positive, negative, cfg, self.device, sampler(), sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) From 1d6dd8318463e896abf9f99cf5381438ee64d302 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Sep 2023 16:25:34 -0400 Subject: [PATCH 022/420] Scheduler code refactor. --- comfy/samplers.py | 66 +++++++++++++++++++++++------------------------ 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 9afde9da7a4..7668d7913c7 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -549,7 +549,7 @@ def sample(self): pass def max_denoise(self, model_wrap, sigmas): - return math.isclose(float(model_wrap.sigma_max), float(sigmas[0])) + return math.isclose(float(model_wrap.sigma_max), float(sigmas[0]), rel_tol=1e-05) class DDIM(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): @@ -631,6 +631,13 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N return samples return KSAMPLER +def wrap_model(model): + model_denoise = CFGNoisePredictor(model) + if model.model_type == model_base.ModelType.V_PREDICTION: + model_wrap = CompVisVDenoiser(model_denoise, quantize=True) + else: + model_wrap = k_diffusion_external.CompVisDenoiser(model_denoise, quantize=True) + return model_wrap def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): positive = positive[:] @@ -639,11 +646,7 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], device) resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], device) - model_denoise = CFGNoisePredictor(model) - if model.model_type == model_base.ModelType.V_PREDICTION: - model_wrap = CompVisVDenoiser(model_denoise, quantize=True) - else: - model_wrap = k_diffusion_external.CompVisDenoiser(model_denoise, quantize=True) + model_wrap = wrap_model(model) calculate_start_end_timesteps(model_wrap, negative) calculate_start_end_timesteps(model_wrap, positive) @@ -687,19 +690,33 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return model.process_latent_out(samples.to(torch.float32)) +SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"] +SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] + +def calculate_sigmas_scheduler(model, scheduler_name, steps): + model_wrap = wrap_model(model) + if scheduler_name == "karras": + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + elif scheduler_name == "exponential": + sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + elif scheduler_name == "normal": + sigmas = model_wrap.get_sigmas(steps) + elif scheduler_name == "simple": + sigmas = simple_scheduler(model_wrap, steps) + elif scheduler_name == "ddim_uniform": + sigmas = ddim_scheduler(model_wrap, steps) + elif scheduler_name == "sgm_uniform": + sigmas = sgm_scheduler(model_wrap, steps) + else: + print("error invalid scheduler", self.scheduler) + return sigmas + class KSampler: - SCHEDULERS = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"] - SAMPLERS = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] + SCHEDULERS = SCHEDULER_NAMES + SAMPLERS = SAMPLER_NAMES def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=None, model_options={}): self.model = model - self.model_denoise = CFGNoisePredictor(self.model) - if self.model.model_type == model_base.ModelType.V_PREDICTION: - self.model_wrap = CompVisVDenoiser(self.model_denoise, quantize=True) - else: - self.model_wrap = k_diffusion_external.CompVisDenoiser(self.model_denoise, quantize=True) - - self.model_k = KSamplerX0Inpaint(self.model_wrap) self.device = device if scheduler not in self.SCHEDULERS: scheduler = self.SCHEDULERS[0] @@ -707,8 +724,6 @@ def __init__(self, model, steps, device, sampler=None, scheduler=None, denoise=N sampler = self.SAMPLERS[0] self.scheduler = scheduler self.sampler = sampler - self.sigma_min=float(self.model_wrap.sigma_min) - self.sigma_max=float(self.model_wrap.sigma_max) self.set_steps(steps, denoise) self.denoise = denoise self.model_options = model_options @@ -721,20 +736,7 @@ def calculate_sigmas(self, steps): steps += 1 discard_penultimate_sigma = True - if self.scheduler == "karras": - sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max) - elif self.scheduler == "exponential": - sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=self.sigma_min, sigma_max=self.sigma_max) - elif self.scheduler == "normal": - sigmas = self.model_wrap.get_sigmas(steps) - elif self.scheduler == "simple": - sigmas = simple_scheduler(self.model_wrap, steps) - elif self.scheduler == "ddim_uniform": - sigmas = ddim_scheduler(self.model_wrap, steps) - elif self.scheduler == "sgm_uniform": - sigmas = sgm_scheduler(self.model_wrap, steps) - else: - print("error invalid scheduler", self.scheduler) + sigmas = calculate_sigmas_scheduler(self.model, self.scheduler, steps) if discard_penultimate_sigma: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) @@ -752,10 +754,8 @@ def set_steps(self, steps, denoise=None): def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=None, last_step=None, force_full_denoise=False, denoise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None): if sigmas is None: sigmas = self.sigmas - sigma_min = self.sigma_min if last_step is not None and last_step < (len(sigmas) - 1): - sigma_min = sigmas[last_step] sigmas = sigmas[:last_step + 1] if force_full_denoise: sigmas[-1] = 0 From fff491b03289ac954eb465b9a57b30f695259c41 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Sep 2023 12:04:07 -0400 Subject: [PATCH 023/420] Model patches can now know which batch is positive and negative. --- comfy/ldm/modules/attention.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 34484b288b4..fcae6b66a79 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -538,6 +538,8 @@ def _forward(self, x, context=None, transformer_options={}): if "block" in transformer_options: block = transformer_options["block"] extra_options["block"] = block + if "cond_or_uncond" in transformer_options: + extra_options["cond_or_uncond"] = transformer_options["cond_or_uncond"] if "patches" in transformer_options: transformer_patches = transformer_options["patches"] else: From bf3fc2f1b7f5b5cf684246be84838e6fc19aeb06 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Sep 2023 16:45:22 -0400 Subject: [PATCH 024/420] Refactor sampling related code. --- comfy/sample.py | 22 +++++++++++++--------- latent_preview.py | 18 ++++++++++++++++++ nodes.py | 17 +---------------- 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index e4730b189ad..fe9f4118dd7 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -70,25 +70,29 @@ def cleanup_additional_models(models): if hasattr(m, 'cleanup'): m.cleanup() -def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None): - device = comfy.model_management.get_torch_device() +def prepare_sampling(model, noise_shape, positive, negative, noise_mask): + device = model.load_device if noise_mask is not None: - noise_mask = prepare_mask(noise_mask, noise.shape, device) + noise_mask = prepare_mask(noise_mask, noise_shape, device) real_model = None models, inference_memory = get_additional_models(positive, negative, model.model_dtype()) - comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise.shape[0] * noise.shape[2] * noise.shape[3]) + inference_memory) + comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise_shape[0] * noise_shape[2] * noise_shape[3]) + inference_memory) real_model = model.model - noise = noise.to(device) - latent_image = latent_image.to(device) + positive_copy = broadcast_cond(positive, noise_shape[0], device) + negative_copy = broadcast_cond(negative, noise_shape[0], device) + return real_model, positive_copy, negative_copy, noise_mask, models + - positive_copy = broadcast_cond(positive, noise.shape[0], device) - negative_copy = broadcast_cond(negative, noise.shape[0], device) +def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None): + real_model, positive_copy, negative_copy, noise_mask, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask) + noise = noise.to(model.load_device) + latent_image = latent_image.to(model.load_device) - sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) + sampler = comfy.samplers.KSampler(real_model, steps=steps, device=model.load_device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) samples = samples.cpu() diff --git a/latent_preview.py b/latent_preview.py index 87240a58291..740e0860776 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -5,6 +5,7 @@ from comfy.cli_args import args, LatentPreviewMethod from comfy.taesd.taesd import TAESD import folder_paths +import comfy.utils MAX_PREVIEW_RESOLUTION = 512 @@ -74,4 +75,21 @@ def get_previewer(device, latent_format): previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors) return previewer +def prepare_callback(model, steps, x0_output_dict=None): + preview_format = "JPEG" + if preview_format not in ["JPEG", "PNG"]: + preview_format = "JPEG" + + previewer = get_previewer(model.load_device, model.model.latent_format) + + pbar = comfy.utils.ProgressBar(steps) + def callback(step, x0, x, total_steps): + if x0_output_dict is not None: + x0_output_dict["x0"] = x0 + + preview_bytes = None + if previewer: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(step + 1, total_steps, preview_bytes) + return callback diff --git a/nodes.py b/nodes.py index 4abb0d24d74..a847db6fbca 100644 --- a/nodes.py +++ b/nodes.py @@ -1189,11 +1189,8 @@ def set_mask(self, samples, mask): s["noise_mask"] = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])) return (s,) - def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False): - device = comfy.model_management.get_torch_device() latent_image = latent["samples"] - if disable_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: @@ -1204,19 +1201,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, if "noise_mask" in latent: noise_mask = latent["noise_mask"] - preview_format = "JPEG" - if preview_format not in ["JPEG", "PNG"]: - preview_format = "JPEG" - - previewer = latent_preview.get_previewer(device, model.model.latent_format) - - pbar = comfy.utils.ProgressBar(steps) - def callback(step, x0, x, total_steps): - preview_bytes = None - if previewer: - preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) - pbar.update_absolute(step + 1, total_steps, preview_bytes) - + callback = latent_preview.prepare_callback(model, steps) samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed) From 1adcc4c3a2f6f329c1e4e7ac3114f254f9b5f558 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Sep 2023 22:21:18 -0400 Subject: [PATCH 025/420] Add a SamplerCustom Node. This node takes a list of sigmas and a sampler object as input. This lets people easily implement custom schedulers and samplers as nodes. More nodes will be added to it in the future. --- comfy/sample.py | 12 ++++ comfy_extras/nodes_custom_sampler.py | 98 ++++++++++++++++++++++++++++ nodes.py | 4 +- 3 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_custom_sampler.py diff --git a/comfy/sample.py b/comfy/sample.py index fe9f4118dd7..322272766f0 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -99,3 +99,15 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative cleanup_additional_models(models) return samples + +def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None): + real_model, positive_copy, negative_copy, noise_mask, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask) + noise = noise.to(model.load_device) + latent_image = latent_image.to(model.load_device) + sigmas = sigmas.to(model.load_device) + + samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + samples = samples.cpu() + cleanup_additional_models(models) + return samples + diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py new file mode 100644 index 00000000000..062852629d8 --- /dev/null +++ b/comfy_extras/nodes_custom_sampler.py @@ -0,0 +1,98 @@ +import comfy.samplers +import comfy.sample +from comfy.k_diffusion import sampling as k_diffusion_sampling +import latent_preview + + +class KarrasScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "rho": ("FLOAT", {"default": 7.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, steps, sigma_max, sigma_min, rho): + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) + return (sigmas, ) + + +class KSamplerSelect: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sampler_name": (comfy.samplers.KSAMPLER_NAMES, ), + } + } + RETURN_TYPES = ("SAMPLER",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sampler" + + def get_sampler(self, sampler_name): + sampler = comfy.samplers.ksampler(sampler_name)() + return (sampler, ) + +class SamplerCustom: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "add_noise": (["enable", "disable"], ), + "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "sampler": ("SAMPLER", ), + "sigmas": ("SIGMAS", ), + "latent_image": ("LATENT", ), + } + } + + RETURN_TYPES = ("LATENT","LATENT") + RETURN_NAMES = ("output", "denoised_output") + + FUNCTION = "sample" + + CATEGORY = "_for_testing/custom_sampling" + + def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image): + latent = latent_image + latent_image = latent["samples"] + if add_noise == "disable": + noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") + else: + batch_inds = latent["batch_index"] if "batch_index" in latent else None + noise = comfy.sample.prepare_noise(latent_image, noise_seed, batch_inds) + + noise_mask = None + if "noise_mask" in latent: + noise_mask = latent["noise_mask"] + + x0_output = {} + callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output) + + disable_pbar = False + samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed) + + out = latent.copy() + out["samples"] = samples + if "x0" in x0_output: + out_denoised = latent.copy() + out_denoised["samples"] = model.model.process_latent_out(x0_output["x0"].cpu()) + else: + out_denoised = out + return (out, out_denoised) + +NODE_CLASS_MAPPINGS = { + "SamplerCustom": SamplerCustom, + "KarrasScheduler": KarrasScheduler, + "KSamplerSelect": KSamplerSelect, +} diff --git a/nodes.py b/nodes.py index a847db6fbca..1232373be0c 100644 --- a/nodes.py +++ b/nodes.py @@ -1202,9 +1202,10 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, noise_mask = latent["noise_mask"] callback = latent_preview.prepare_callback(model, steps) + disable_pbar = False samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, - force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, seed=seed) + force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) out = latent.copy() out["samples"] = samples return (out, ) @@ -1791,4 +1792,5 @@ def init_custom_nodes(): load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py")) load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_freelunch.py")) + load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_custom_sampler.py")) load_custom_nodes() From 1d7dfc07d5e76968c9137c17fca0f7ad77a7b9d8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Sep 2023 22:32:42 -0400 Subject: [PATCH 026/420] Make add_noise in SamplerCustom a boolean. --- comfy_extras/nodes_custom_sampler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 062852629d8..842a9de4f60 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -2,7 +2,7 @@ import comfy.sample from comfy.k_diffusion import sampling as k_diffusion_sampling import latent_preview - +import torch class KarrasScheduler: @classmethod @@ -45,7 +45,7 @@ class SamplerCustom: def INPUT_TYPES(s): return {"required": {"model": ("MODEL",), - "add_noise": (["enable", "disable"], ), + "add_noise": ("BOOLEAN", {"default": True}), "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), "positive": ("CONDITIONING", ), @@ -66,7 +66,7 @@ def INPUT_TYPES(s): def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image): latent = latent_image latent_image = latent["samples"] - if add_noise == "disable": + if not add_noise: noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu") else: batch_inds = latent["batch_index"] if "batch_index" in latent else None From d234ca558a7777b607a4f81aeb9e8703ef020977 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 00:17:03 -0400 Subject: [PATCH 027/420] Add missing samplers to KSamplerSelect. --- comfy/samplers.py | 20 ++++++++++++-------- comfy_extras/nodes_custom_sampler.py | 4 ++-- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 7668d7913c7..a7c240f407c 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -711,6 +711,17 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): print("error invalid scheduler", self.scheduler) return sigmas +def sampler_class(name): + if name == "uni_pc": + sampler = UNIPC + elif name == "uni_pc_bh2": + sampler = UNIPCBH2 + elif name == "ddim": + sampler = DDIM + else: + sampler = ksampler(name) + return sampler + class KSampler: SCHEDULERS = SCHEDULER_NAMES SAMPLERS = SAMPLER_NAMES @@ -769,13 +780,6 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N else: return torch.zeros_like(noise) - if self.sampler == "uni_pc": - sampler = UNIPC - elif self.sampler == "uni_pc_bh2": - sampler = UNIPCBH2 - elif self.sampler == "ddim": - sampler = DDIM - else: - sampler = ksampler(self.sampler) + sampler = sampler_class(self.sampler) return sample(self.model, noise, positive, negative, cfg, self.device, sampler(), sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 842a9de4f60..1c587dbd80a 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -28,7 +28,7 @@ class KSamplerSelect: @classmethod def INPUT_TYPES(s): return {"required": - {"sampler_name": (comfy.samplers.KSAMPLER_NAMES, ), + {"sampler_name": (comfy.samplers.SAMPLER_NAMES, ), } } RETURN_TYPES = ("SAMPLER",) @@ -37,7 +37,7 @@ def INPUT_TYPES(s): FUNCTION = "get_sampler" def get_sampler(self, sampler_name): - sampler = comfy.samplers.ksampler(sampler_name)() + sampler = comfy.samplers.sampler_class(sampler_name)() return (sampler, ) class SamplerCustom: From 2bf051fda87cfa94e5c99bbd88fc7f1434e9e1a2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 00:30:45 -0400 Subject: [PATCH 028/420] Add a basic node to generate sigmas from scheduler. --- comfy_extras/nodes_custom_sampler.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 1c587dbd80a..aafde8f32f8 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -4,6 +4,26 @@ import latent_preview import torch + +class BasicScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "scheduler": (comfy.samplers.SCHEDULER_NAMES, ), + "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, model, scheduler, steps): + sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu() + return (sigmas, ) + + class KarrasScheduler: @classmethod def INPUT_TYPES(s): @@ -95,4 +115,5 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "SamplerCustom": SamplerCustom, "KarrasScheduler": KarrasScheduler, "KSamplerSelect": KSamplerSelect, + "BasicScheduler": BasicScheduler, } From 76e0f8fc8fe330b9568fab4b4a8049a62d141165 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 00:40:09 -0400 Subject: [PATCH 029/420] Add function to split sigmas. --- comfy_extras/nodes_custom_sampler.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index aafde8f32f8..efe03ad24cf 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -43,6 +43,23 @@ def get_sigmas(self, steps, sigma_max, sigma_min, rho): sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) return (sigmas, ) +class SplitSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + "step": ("INT", {"default": 0, "min": 0, "max": 10000}), + } + } + RETURN_TYPES = ("SIGMAS","SIGMAS") + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, sigmas, step): + sigmas1 = sigmas[:step + 1] + sigmas2 = sigmas[step + 1:] + return (sigmas1, sigmas2) class KSamplerSelect: @classmethod @@ -116,4 +133,5 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "KarrasScheduler": KarrasScheduler, "KSamplerSelect": KSamplerSelect, "BasicScheduler": BasicScheduler, + "SplitSigmas": SplitSigmas, } From 71713888c4d2af38c2f25f39226933081f5f70d7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 00:54:57 -0400 Subject: [PATCH 030/420] Print missing VAE keys. --- comfy/sd.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy/sd.py b/comfy/sd.py index 9bdb2ad64ff..2f1b2e96472 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -152,7 +152,9 @@ def __init__(self, ckpt_path=None, device=None, config=None): sd = comfy.utils.load_torch_file(ckpt_path) if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) - self.first_stage_model.load_state_dict(sd, strict=False) + m, u = self.first_stage_model.load_state_dict(sd, strict=False) + if len(m) > 0: + print("Missing VAE keys", m) if device is None: device = model_management.vae_device() From 26b73728053a786c429356fc02a7c98868d2ba02 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 01:11:22 -0400 Subject: [PATCH 031/420] Fix SplitSigmas. --- comfy_extras/nodes_custom_sampler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index efe03ad24cf..5e5ef61b5d3 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -58,7 +58,7 @@ def INPUT_TYPES(s): def get_sigmas(self, sigmas, step): sigmas1 = sigmas[:step + 1] - sigmas2 = sigmas[step + 1:] + sigmas2 = sigmas[step:] return (sigmas1, sigmas2) class KSamplerSelect: From 66756de1002c23ec4005504232e3f8e5096c964b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Sep 2023 21:56:23 -0400 Subject: [PATCH 032/420] Add SamplerDPMPP_2M_SDE node. --- comfy/samplers.py | 4 ++-- comfy_extras/nodes_custom_sampler.py | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index a7c240f407c..e43f7a6fe74 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -598,7 +598,7 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] -def ksampler(sampler_name): +def ksampler(sampler_name, extra_options={}): class KSAMPLER(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): extra_args["denoise_mask"] = denoise_mask @@ -627,7 +627,7 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N elif sampler_name == "dpm_adaptive": samples = k_diffusion_sampling.sample_dpm_adaptive(model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar) + samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **extra_options) return samples return KSAMPLER diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 5e5ef61b5d3..b667afe4f1b 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -77,6 +77,30 @@ def get_sampler(self, sampler_name): sampler = comfy.samplers.sampler_class(sampler_name)() return (sampler, ) +class SamplerDPMPP_2M_SDE: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"solver_type": (['midpoint', 'heun'], ), + "eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + "noise_device": (['gpu', 'cpu'], ), + } + } + RETURN_TYPES = ("SAMPLER",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sampler" + + def get_sampler(self, solver_type, eta, s_noise, noise_device): + if noise_device == 'cpu': + sampler_name = "dpmpp_2m_sde" + else: + sampler_name = "dpmpp_2m_sde_gpu" + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})() + return (sampler, ) + + class SamplerCustom: @classmethod def INPUT_TYPES(s): @@ -132,6 +156,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "SamplerCustom": SamplerCustom, "KarrasScheduler": KarrasScheduler, "KSamplerSelect": KSamplerSelect, + "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, } From 1c8ae9dbb249ed5326d61d16b4e6b5807c09c0e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Fri, 29 Sep 2023 05:01:19 +0300 Subject: [PATCH 033/420] Allow GrowMask node to work with batches (for AnimateDiff) (#1623) * Allow mask batches This allows LatentCompositeMasked -node to work with AnimateDiff. I tried to keep old functionality too, unsure if it's correct, but both single mask and batch of masks seems to work with this change. * Update nodes_mask.py --- comfy_extras/nodes_mask.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index af7cb07bfb3..cdf762ffd05 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -331,15 +331,14 @@ def expand_mask(self, mask, expand, tapered_corners): out = [] for m in mask: output = m.numpy() - while expand < 0: - output = scipy.ndimage.grey_erosion(output, footprint=kernel) - expand += 1 - while expand > 0: - output = scipy.ndimage.grey_dilation(output, footprint=kernel) - expand -= 1 + for _ in range(abs(expand)): + if expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + else: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) output = torch.from_numpy(output) out.append(output) - return (torch.cat(out, dim=0),) + return (torch.stack(out, dim=0),) From 0f17993d0587254fcff06bf689dfe38300ea8834 Mon Sep 17 00:00:00 2001 From: badayvedat Date: Fri, 29 Sep 2023 06:09:59 +0300 Subject: [PATCH 034/420] fix: typo in extra sampler --- comfy/extra_samplers/uni_pc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 7eaf6ff62b6..7e88bb9fa1b 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -688,7 +688,7 @@ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t) else: x_t_ = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dimss) * x + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0 ) if x_t is None: From 213976f8c3ea3f45f0c692dd8aac2fd9fea433e3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 29 Sep 2023 09:05:30 -0400 Subject: [PATCH 035/420] Add ExponentialScheduler and PolyexponentialScheduler nodes. --- comfy_extras/nodes_custom_sampler.py | 39 ++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index b667afe4f1b..a1dc9784801 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -43,6 +43,43 @@ def get_sigmas(self, steps, sigma_max, sigma_min, rho): sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) return (sigmas, ) +class ExponentialScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, steps, sigma_max, sigma_min): + sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max) + return (sigmas, ) + +class PolyexponentialScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "sigma_max": ("FLOAT", {"default": 14.614642, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "sigma_min": ("FLOAT", {"default": 0.0291675, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "rho": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, steps, sigma_max, sigma_min, rho): + sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) + return (sigmas, ) + class SplitSigmas: @classmethod def INPUT_TYPES(s): @@ -155,6 +192,8 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, NODE_CLASS_MAPPINGS = { "SamplerCustom": SamplerCustom, "KarrasScheduler": KarrasScheduler, + "ExponentialScheduler": ExponentialScheduler, + "PolyexponentialScheduler": PolyexponentialScheduler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "BasicScheduler": BasicScheduler, From 8ab49dc0a4768f17c5a46627fd5601a484549a5b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 30 Sep 2023 01:31:52 -0400 Subject: [PATCH 036/420] DPMPP_SDE node. --- comfy_extras/nodes_custom_sampler.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index a1dc9784801..d2cec7f0944 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -138,6 +138,29 @@ def get_sampler(self, solver_type, eta, s_noise, noise_device): return (sampler, ) +class SamplerDPMPP_SDE: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + "s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + "r": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 100.0, "step":0.01, "round": False}), + "noise_device": (['gpu', 'cpu'], ), + } + } + RETURN_TYPES = ("SAMPLER",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sampler" + + def get_sampler(self, eta, s_noise, r, noise_device): + if noise_device == 'cpu': + sampler_name = "dpmpp_sde" + else: + sampler_name = "dpmpp_sde_gpu" + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})() + return (sampler, ) + class SamplerCustom: @classmethod def INPUT_TYPES(s): @@ -196,6 +219,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "PolyexponentialScheduler": PolyexponentialScheduler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, + "SamplerDPMPP_SDE": SamplerDPMPP_SDE, "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, } From 2ef459b1d4d627929c84d11e5e0cbe3ded9c9f48 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 1 Oct 2023 03:48:07 -0400 Subject: [PATCH 037/420] Add VPScheduler node --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d2cec7f0944..42a1fd6ba03 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -80,6 +80,25 @@ def get_sigmas(self, steps, sigma_max, sigma_min, rho): sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) return (sigmas, ) +class VPScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "beta_d": ("FLOAT", {"default": 19.9, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), #TODO: fix default values + "beta_min": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 1000.0, "step":0.01, "round": False}), + "eps_s": ("FLOAT", {"default": 0.001, "min": 0.0, "max": 1.0, "step":0.0001, "round": False}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "_for_testing/custom_sampling" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, steps, beta_d, beta_min, eps_s): + sigmas = k_diffusion_sampling.get_sigmas_vp(n=steps, beta_d=beta_d, beta_min=beta_min, eps_s=eps_s) + return (sigmas, ) + class SplitSigmas: @classmethod def INPUT_TYPES(s): @@ -217,6 +236,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "KarrasScheduler": KarrasScheduler, "ExponentialScheduler": ExponentialScheduler, "PolyexponentialScheduler": PolyexponentialScheduler, + "VPScheduler": VPScheduler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, From ec454c771b8c2007fbf08602a3205bacd96272a6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 2 Oct 2023 17:26:59 -0400 Subject: [PATCH 038/420] Refactor with code from comment of #1588 --- nodes.py | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/nodes.py b/nodes.py index 1232373be0c..919aac89e16 100644 --- a/nodes.py +++ b/nodes.py @@ -1781,16 +1781,23 @@ def load_custom_nodes(): print() def init_custom_nodes(): - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_latent.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_hypernetwork.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_upscale_model.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_post_processing.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_mask.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_rebatch.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_model_merging.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_tomesd.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_clip_sdxl.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_canny.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_freelunch.py")) - load_custom_node(os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras"), "nodes_custom_sampler.py")) + extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras") + extras_files = [ + "nodes_latent.py", + "nodes_hypernetwork.py", + "nodes_upscale_model.py", + "nodes_post_processing.py", + "nodes_mask.py", + "nodes_rebatch.py", + "nodes_model_merging.py", + "nodes_tomesd.py", + "nodes_clip_sdxl.py", + "nodes_canny.py", + "nodes_freelunch.py", + "nodes_custom_sampler.py" + ] + + for node_file in extras_files: + load_custom_node(os.path.join(extras_dir, node_file)) + load_custom_nodes() From fe1e2dbe9000ad3365a71986c726259c1353d304 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 3 Oct 2023 00:01:49 -0400 Subject: [PATCH 039/420] pytorch nightly is now ROCm 5.7 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d83b4bdac7f..97677921add 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ComfyUI ======= -A powerful and modular stable diffusion GUI and backend. +The most powerful and modular stable diffusion GUI and backend. ----------- ![ComfyUI Screenshot](comfyui_screenshot.png) @@ -94,8 +94,8 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` -This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6``` +This is the command to install the nightly with ROCm 5.7 that supports the 7000 series and might have some performance improvements: +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7``` ### NVIDIA From 1f38de1fb3c9e1d8bed81fef7901d5f37561d937 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" Date: Tue, 3 Oct 2023 18:30:38 +0900 Subject: [PATCH 040/420] If an error occurs while retrieving object_info, only the node that encountered the error should be handled as an exception, while the information for the other nodes should continue to be processed normally. --- server.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/server.py b/server.py index b2e16716ba8..63f337a873f 100644 --- a/server.py +++ b/server.py @@ -413,7 +413,11 @@ def node_info(node_class): async def get_object_info(request): out = {} for x in nodes.NODE_CLASS_MAPPINGS: - out[x] = node_info(x) + try: + out[x] = node_info(x) + except Exception as e: + print(f"[ERROR] An error occurred while retrieving information for the '{x}' node.", file=sys.stderr) + traceback.print_exc() return web.json_response(out) @routes.get("/object_info/{node_class}") From 6fc73143934028771466f76818ebef3219bb1793 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 3 Oct 2023 20:19:12 +0100 Subject: [PATCH 041/420] support refreshing primitive combos no longer uses combo list as type name --- web/extensions/core/widgetInputs.js | 126 +++++++++++++++++++--------- web/scripts/app.js | 53 +++++++++++- 2 files changed, 136 insertions(+), 43 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 606605f0a96..98d52b02c97 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -4,6 +4,11 @@ import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; +function getConfig(widgetName) { + const { nodeData } = this.constructor; + return nodeData?.input?.required[widgetName] ?? nodeData?.input?.optional?.[widgetName]; +} + function isConvertableWidget(widget, config) { return (VALID_TYPES.includes(widget.type) || VALID_TYPES.includes(config[0])) && !widget.options?.forceInput; } @@ -55,12 +60,12 @@ function showWidget(widget) { function convertToInput(node, widget, config) { hideWidget(node, widget); - const { linkType } = getWidgetType(config); + const { linkType } = getWidgetType(config, `${node.comfyClass}|${widget.name}`); // Add input and store widget config for creating on primitive node const sz = node.size; node.addInput(widget.name, linkType, { - widget: { name: widget.name, config }, + widget: { name: widget.name, getConfig: () => config }, }); for (const widget of node.widgets) { @@ -84,13 +89,13 @@ function convertToWidget(node, widget) { node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); } -function getWidgetType(config) { +function getWidgetType(config, comboType) { // Special handling for COMBO so we restrict links based on the entries let type = config[0]; let linkType = type; if (type instanceof Array) { type = "COMBO"; - linkType = linkType.join(","); + linkType = comboType; } return { type, linkType }; } @@ -116,7 +121,7 @@ app.registerExtension({ callback: () => convertToWidget(this, w), }); } else { - const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]; + const config = getConfig.call(this, w.name) ?? [w.type, w.options || {}]; if (isConvertableWidget(w, config)) { toInput.push({ content: `Convert ${w.name} to input`, @@ -137,34 +142,56 @@ app.registerExtension({ return r; }; - const origOnNodeCreated = nodeType.prototype.onNodeCreated + nodeType.prototype.onGraphConfigured = function () { + if (!this.inputs) return; + + for (const input of this.inputs) { + if (input.widget) { + // Cleanup old widget config + delete input.widget.config; + + if (!input.widget.getConfig) { + input.widget.getConfig = getConfig.bind(this, input.widget.name); + } + + const config = input.widget.getConfig(); + if (config[1]?.forceInput) continue; + + const w = this.widgets.find((w) => w.name === input.widget.name); + if (w) { + hideWidget(this, w); + } else { + convertToWidget(this, input); + } + } + } + }; + + const origOnNodeCreated = nodeType.prototype.onNodeCreated; nodeType.prototype.onNodeCreated = function () { const r = origOnNodeCreated ? origOnNodeCreated.apply(this) : undefined; - if (this.widgets) { + + // When node is created, convert any force/default inputs + if (!app.configuringGraph && this.widgets) { for (const w of this.widgets) { if (w?.options?.forceInput || w?.options?.defaultInput) { - const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}]; + const config = getConfig.call(this, w.name) ?? [w.type, w.options || {}]; convertToInput(this, w, config); } } } + return r; - } + }; - // On initial configure of nodes hide all converted widgets const origOnConfigure = nodeType.prototype.onConfigure; nodeType.prototype.onConfigure = function () { const r = origOnConfigure ? origOnConfigure.apply(this, arguments) : undefined; - - if (this.inputs) { + if (!app.configuringGraph && this.inputs) { + // On copy + paste of nodes, ensure that widget configs are set up for (const input of this.inputs) { - if (input.widget && !input.widget.config[1]?.forceInput) { - const w = this.widgets.find((w) => w.name === input.widget.name); - if (w) { - hideWidget(this, w); - } else { - convertToWidget(this, input) - } + if (input.widget && !input.widget.getConfig) { + input.widget.getConfig = getConfig.bind(this, input.widget.name); } } } @@ -190,7 +217,7 @@ app.registerExtension({ const input = this.inputs[slot]; if (!input.widget || !input[ignoreDblClick]) { // Not a widget input or already handled input - if (!(input.type in ComfyWidgets) && !(input.widget.config?.[0] instanceof Array)) { + if (!(input.type in ComfyWidgets) && !(input.widget.getConfig?.()?.[0] instanceof Array)) { return r; //also Not a ComfyWidgets input or combo (do nothing) } } @@ -262,17 +289,38 @@ app.registerExtension({ } } + refreshComboInNode() { + const widget = this.widgets?.[0]; + if (widget?.type === "combo") { + widget.options.values = this.outputs[0].widget.getConfig()[0]; + + if (!widget.options.values.includes(widget.value)) { + widget.value = widget.options.values[0]; + widget.callback(widget.value); + } + } + } + + onAfterGraphConfigured() { + if (this.outputs[0].links?.length && !this.widgets?.length) { + this.#onFirstConnection(); + + // Populate widget values from config data + for (let i = 0; i < this.widgets_values.length; i++) { + this.widgets[i].value = this.widgets_values[i]; + } + } + } + onConnectionsChange(_, index, connected) { + if (app.configuringGraph) { + // Dont run while the graph is still setting up + return; + } + if (connected) { - if (this.outputs[0].links?.length) { - if (!this.widgets?.length) { - this.#onFirstConnection(); - } - if (!this.widgets?.length && this.outputs[0].widget) { - // On first load it often cant recreate the widget as the other node doesnt exist yet - // Manually recreate it from the output info - this.#createWidget(this.outputs[0].widget.config); - } + if (this.outputs[0].links?.length && !this.widgets?.length) { + this.#onFirstConnection(); } } else if (!this.outputs[0].links?.length) { this.#onLastDisconnect(); @@ -304,23 +352,21 @@ app.registerExtension({ const input = theirNode.inputs[link.target_slot]; if (!input) return; - - var _widget; + let widget; if (!input.widget) { if (!(input.type in ComfyWidgets)) return; - _widget = { "name": input.name, "config": [input.type, {}] }//fake widget + widget = { name: input.name, getConfig: () => [input.type, {}] }; //fake widget } else { - _widget = input.widget; + widget = input.widget; } - const widget = _widget; - const { type, linkType } = getWidgetType(widget.config); + const { type, linkType } = getWidgetType(widget.getConfig(), `${theirNode.comfyClass}|${widget.name}`); // Update our output to restrict to the widget type this.outputs[0].type = linkType; this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget.config, theirNode, widget.name); + this.#createWidget(widget.getConfig(), theirNode, widget.name); } #createWidget(inputData, node, widgetName) { @@ -334,7 +380,7 @@ app.registerExtension({ if (type in ComfyWidgets) { widget = (ComfyWidgets[type](this, "value", inputData, app) || {}).widget; } else { - widget = this.addWidget(type, "value", null, () => { }, {}); + widget = this.addWidget(type, "value", null, () => {}, {}); } if (node?.widgets && widget) { @@ -376,8 +422,8 @@ app.registerExtension({ #isValidConnection(input) { // Only allow connections where the configs match - const config1 = this.outputs[0].widget.config; - const config2 = input.widget.config; + const config1 = this.outputs[0].widget.getConfig(); + const config2 = input.widget.getConfig(); if (config1[0] instanceof Array) { // These checks shouldnt actually be necessary as the types should match @@ -395,7 +441,7 @@ app.registerExtension({ } for (const k in config1[1]) { - if (k !== "default" && k !== 'forceInput') { + if (k !== "default" && k !== "forceInput") { if (config1[1][k] !== config2[1][k]) { return false; } diff --git a/web/scripts/app.js b/web/scripts/app.js index b41c12b8669..3c29a684ad4 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1114,6 +1114,40 @@ export class ComfyApp { }); } + #addConfigureHandler() { + const app = this; + const configure = LGraph.prototype.configure; + // Flag that the graph is configuring to prevent nodes from running checks while its still loading + LGraph.prototype.configure = function () { + app.configuringGraph = true; + try { + return configure.apply(this, arguments); + } finally { + app.configuringGraph = false; + } + }; + } + + #addAfterConfigureHandler() { + const app = this; + const onConfigure = app.graph.onConfigure; + app.graph.onConfigure = function () { + // Fire callbacks before the onConfigure, this is used by widget inputs to setup the config + for (const node of app.graph._nodes) { + node.onGraphConfigured?.(); + } + + const r = onConfigure?.apply(this, arguments); + + // Fire after onConfigure, used by primitves to generate widget using input nodes config + for (const node of app.graph._nodes) { + node.onAfterGraphConfigured?.(); + } + + return r; + }; + } + /** * Loads all extensions from the API into the window in parallel */ @@ -1147,8 +1181,12 @@ export class ComfyApp { this.#addProcessMouseHandler(); this.#addProcessKeyHandler(); + this.#addConfigureHandler(); this.graph = new LGraph(); + + this.#addAfterConfigureHandler(); + const canvas = (this.canvas = new LGraphCanvas(canvasEl, this.graph)); this.ctx = canvasEl.getContext("2d"); @@ -1285,6 +1323,7 @@ export class ComfyApp { { title: nodeData.display_name || nodeData.name, comfyClass: nodeData.name, + nodeData } ); node.prototype.comfyClass = nodeData.name; @@ -1670,13 +1709,21 @@ export class ComfyApp { async refreshComboInNodes() { const defs = await api.getNodeDefs(); + for(const nodeId in LiteGraph.registered_node_types) { + const node = LiteGraph.registered_node_types[nodeId]; + const nodeDef = defs[nodeId]; + if(!nodeDef) continue; + + node.nodeData = nodeDef; + } + for(let nodeNum in this.graph._nodes) { const node = this.graph._nodes[nodeNum]; - const def = defs[node.type]; - // HOTFIX: The current patch is designed to prevent the rest of the code from breaking due to primitive nodes, - // and additional work is needed to consider the primitive logic in the refresh logic. + // Allow primitive nodes to handle refresh + node.refreshComboInNode?.(defs); + if(!def) continue; From 9bfec2bdbf0b0d778087a9b32f79e57e2d15b913 Mon Sep 17 00:00:00 2001 From: City <125218114+city96@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:40:59 +0200 Subject: [PATCH 042/420] Fix quality loss due to low precision --- comfy/sd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 2f1b2e96472..f186273eaa6 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -183,7 +183,7 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap) pbar = comfy.utils.ProgressBar(steps) - encode_fn = lambda a: self.first_stage_model.encode(2. * a.to(self.vae_dtype).to(self.device) - 1.).sample().float() + encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).sample().float() samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) @@ -202,7 +202,7 @@ def decode(self, samples_in): pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu") for x in range(0, samples_in.shape[0], batch_number): samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device) - pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples) + 1.0) / 2.0, min=0.0, max=1.0).cpu().float() + pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).cpu().float() + 1.0) / 2.0, min=0.0, max=1.0) except model_management.OOM_EXCEPTION as e: print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") pixel_samples = self.decode_tiled_(samples_in) From d06cd2805d86d7a9ed7485b6a0c7e113cff27d8e Mon Sep 17 00:00:00 2001 From: MoonRide303 Date: Fri, 22 Sep 2023 23:03:22 +0200 Subject: [PATCH 043/420] Added support for Porter-Duff image compositing --- comfy_extras/nodes_compositing.py | 239 ++++++++++++++++++++++++++++++ nodes.py | 28 ++++ 2 files changed, 267 insertions(+) create mode 100644 comfy_extras/nodes_compositing.py diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py new file mode 100644 index 00000000000..c4c58b64ed1 --- /dev/null +++ b/comfy_extras/nodes_compositing.py @@ -0,0 +1,239 @@ +import numpy as np +import torch +import comfy.utils +from enum import Enum + + +class PorterDuffMode(Enum): + ADD = 0 + CLEAR = 1 + DARKEN = 2 + DST = 3 + DST_ATOP = 4 + DST_IN = 5 + DST_OUT = 6 + DST_OVER = 7 + LIGHTEN = 8 + MULTIPLY = 9 + OVERLAY = 10 + SCREEN = 11 + SRC = 12 + SRC_ATOP = 13 + SRC_IN = 14 + SRC_OUT = 15 + SRC_OVER = 16 + XOR = 17 + + +def porter_duff_composite(src_image: torch.Tensor, src_alpha: torch.Tensor, dst_image: torch.Tensor, dst_alpha: torch.Tensor, mode: PorterDuffMode): + if mode == PorterDuffMode.ADD: + out_alpha = torch.clamp(src_alpha + dst_alpha, 0, 1) + out_image = torch.clamp(src_image + dst_image, 0, 1) + elif mode == PorterDuffMode.CLEAR: + out_alpha = torch.zeros_like(dst_alpha) + out_image = torch.zeros_like(dst_image) + elif mode == PorterDuffMode.DARKEN: + out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha + out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.min(src_image, dst_image) + elif mode == PorterDuffMode.DST: + out_alpha = dst_alpha + out_image = dst_image + elif mode == PorterDuffMode.DST_ATOP: + out_alpha = src_alpha + out_image = src_alpha * dst_image + (1 - dst_alpha) * src_image + elif mode == PorterDuffMode.DST_IN: + out_alpha = src_alpha * dst_alpha + out_image = dst_image * src_alpha + elif mode == PorterDuffMode.DST_OUT: + out_alpha = (1 - src_alpha) * dst_alpha + out_image = (1 - src_alpha) * dst_image + elif mode == PorterDuffMode.DST_OVER: + out_alpha = dst_alpha + (1 - dst_alpha) * src_alpha + out_image = dst_image + (1 - dst_alpha) * src_image + elif mode == PorterDuffMode.LIGHTEN: + out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha + out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + torch.max(src_image, dst_image) + elif mode == PorterDuffMode.MULTIPLY: + out_alpha = src_alpha * dst_alpha + out_image = src_image * dst_image + elif mode == PorterDuffMode.OVERLAY: + out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha + out_image = torch.where(2 * dst_image < dst_alpha, 2 * src_image * dst_image, + src_alpha * dst_alpha - 2 * (dst_alpha - src_image) * (src_alpha - dst_image)) + elif mode == PorterDuffMode.SCREEN: + out_alpha = src_alpha + dst_alpha - src_alpha * dst_alpha + out_image = src_image + dst_image - src_image * dst_image + elif mode == PorterDuffMode.SRC: + out_alpha = src_alpha + out_image = src_image + elif mode == PorterDuffMode.SRC_ATOP: + out_alpha = dst_alpha + out_image = dst_alpha * src_image + (1 - src_alpha) * dst_image + elif mode == PorterDuffMode.SRC_IN: + out_alpha = src_alpha * dst_alpha + out_image = src_image * dst_alpha + elif mode == PorterDuffMode.SRC_OUT: + out_alpha = (1 - dst_alpha) * src_alpha + out_image = (1 - dst_alpha) * src_image + elif mode == PorterDuffMode.SRC_OVER: + out_alpha = src_alpha + (1 - src_alpha) * dst_alpha + out_image = src_image + (1 - src_alpha) * dst_image + elif mode == PorterDuffMode.XOR: + out_alpha = (1 - dst_alpha) * src_alpha + (1 - src_alpha) * dst_alpha + out_image = (1 - dst_alpha) * src_image + (1 - src_alpha) * dst_image + else: + out_alpha = None + out_image = None + return out_image, out_alpha + + +class PorterDuffImageComposite: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "source": ("IMAGE",), + "source_alpha": ("ALPHA",), + "destination": ("IMAGE",), + "destination_alpha": ("ALPHA",), + "mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}), + }, + } + + RETURN_TYPES = ("IMAGE", "ALPHA") + FUNCTION = "composite" + CATEGORY = "compositing" + + def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode): + batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha)) + out_images = [] + out_alphas = [] + + for i in range(batch_size): + src_image = source[i] + dst_image = destination[i] + + src_alpha = source_alpha[i].unsqueeze(2) + dst_alpha = destination_alpha[i].unsqueeze(2) + + if dst_alpha.shape != dst_image.shape: + upscale_input = dst_alpha[None,:,:,:].permute(0, 3, 1, 2) + upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center') + dst_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0) + if src_image.shape != dst_image.shape: + upscale_input = src_image[None,:,:,:].permute(0, 3, 1, 2) + upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center') + src_image = upscale_output.permute(0, 2, 3, 1).squeeze(0) + if src_alpha.shape != dst_alpha.shape: + upscale_input = src_alpha[None,:,:,:].permute(0, 3, 1, 2) + upscale_output = comfy.utils.common_upscale(upscale_input, dst_alpha.shape[1], dst_alpha.shape[0], upscale_method='bicubic', crop='center') + src_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0) + + out_image, out_alpha = porter_duff_composite(src_image, src_alpha, dst_image, dst_alpha, PorterDuffMode[mode]) + + out_images.append(out_image) + out_alphas.append(out_alpha.squeeze(2)) + + result = (torch.stack(out_images), torch.stack(out_alphas)) + return result + + +class SplitImageWithAlpha: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + } + } + + CATEGORY = "compositing" + RETURN_TYPES = ("IMAGE", "ALPHA") + FUNCTION = "split_image_with_alpha" + + def split_image_with_alpha(self, image: torch.Tensor): + out_images = [i[:,:,:3] for i in image] + out_alphas = [i[:,:,3] for i in image] + result = (torch.stack(out_images), torch.stack(out_alphas)) + return result + + +class JoinImageWithAlpha: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "alpha": ("ALPHA",), + } + } + + CATEGORY = "compositing" + RETURN_TYPES = ("IMAGE",) + FUNCTION = "join_image_with_alpha" + + def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): + batch_size = min(len(image), len(alpha)) + out_images = [] + + for i in range(batch_size): + out_images.append(torch.cat((image[i], alpha[i].unsqueeze(2)), dim=2)) + + result = (torch.stack(out_images),) + return result + + +class ConvertAlphaToImage: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "alpha": ("ALPHA",), + } + } + + CATEGORY = "compositing" + RETURN_TYPES = ("IMAGE",) + FUNCTION = "alpha_to_image" + + def alpha_to_image(self, alpha): + result = alpha.reshape((-1, 1, alpha.shape[-2], alpha.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + return (result,) + + +class ConvertImageToAlpha: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "channel": (["red", "green", "blue", "alpha"],), + } + } + + CATEGORY = "compositing" + RETURN_TYPES = ("ALPHA",) + FUNCTION = "image_to_alpha" + + def image_to_alpha(self, image, channel): + channels = ["red", "green", "blue", "alpha"] + alpha = image[0, :, :, channels.index(channel)] + return (alpha,) + + +NODE_CLASS_MAPPINGS = { + "PorterDuffImageComposite": PorterDuffImageComposite, + "SplitImageWithAlpha": SplitImageWithAlpha, + "JoinImageWithAlpha": JoinImageWithAlpha, + "ConvertAlphaToImage": ConvertAlphaToImage, + "ConvertImageToAlpha": ConvertImageToAlpha, +} + + +NODE_DISPLAY_NAME_MAPPINGS = { + "PorterDuffImageComposite": "Porter-Duff Image Composite", + "SplitImageWithAlpha": "Split Image with Alpha", + "JoinImageWithAlpha": "Join Image with Alpha", + "ConvertAlphaToImage": "Convert Alpha to Image", + "ConvertImageToAlpha": "Convert Image to Alpha", +} diff --git a/nodes.py b/nodes.py index 919aac89e16..8be332f9188 100644 --- a/nodes.py +++ b/nodes.py @@ -1372,6 +1372,31 @@ def VALIDATE_INPUTS(s, image): return True +class LoadImageWithAlpha(LoadImage): + @classmethod + def INPUT_TYPES(s): + input_dir = folder_paths.get_input_directory() + files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] + return {"required": + {"image": (sorted(files), {"image_upload": True})}, + } + + CATEGORY = "compositing" + + RETURN_TYPES = ("IMAGE", "ALPHA") + + FUNCTION = "load_image" + def load_image(self, image): + image_path = folder_paths.get_annotated_filepath(image) + i = Image.open(image_path) + i = ImageOps.exif_transpose(i) + image = i.convert("RGBA") + alpha = np.array(image.getchannel("A")).astype(np.float32) / 255.0 + alpha = torch.from_numpy(alpha)[None,] + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + return (image, alpha) + class LoadImageMask: _color_channels = ["alpha", "red", "green", "blue"] @classmethod @@ -1606,6 +1631,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "SaveImage": SaveImage, "PreviewImage": PreviewImage, "LoadImage": LoadImage, + "LoadImageWithAlpha": LoadImageWithAlpha, "LoadImageMask": LoadImageMask, "ImageScale": ImageScale, "ImageScaleBy": ImageScaleBy, @@ -1702,6 +1728,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "SaveImage": "Save Image", "PreviewImage": "Preview Image", "LoadImage": "Load Image", + "LoadImageWithAlpha": "Load Image with Alpha", "LoadImageMask": "Load Image (as Mask)", "ImageScale": "Upscale Image", "ImageScaleBy": "Upscale Image By", @@ -1788,6 +1815,7 @@ def init_custom_nodes(): "nodes_upscale_model.py", "nodes_post_processing.py", "nodes_mask.py", + "nodes_compositing.py", "nodes_rebatch.py", "nodes_model_merging.py", "nodes_tomesd.py", From ece69bf28c0d5872bdec1cc9e66db50f09eaa74b Mon Sep 17 00:00:00 2001 From: MoonRide303 Date: Sat, 23 Sep 2023 08:34:54 +0200 Subject: [PATCH 044/420] Change channel type to MASK (reduced redundancy, increased usability) --- comfy_extras/nodes_compositing.py | 52 +++---------------------------- comfy_extras/nodes_mask.py | 4 +-- nodes.py | 2 +- 3 files changed, 8 insertions(+), 50 deletions(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index c4c58b64ed1..6899e4a86bc 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -93,14 +93,14 @@ def INPUT_TYPES(s): return { "required": { "source": ("IMAGE",), - "source_alpha": ("ALPHA",), + "source_alpha": ("MASK",), "destination": ("IMAGE",), - "destination_alpha": ("ALPHA",), + "destination_alpha": ("MASK",), "mode": ([mode.name for mode in PorterDuffMode], {"default": PorterDuffMode.DST.name}), }, } - RETURN_TYPES = ("IMAGE", "ALPHA") + RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "composite" CATEGORY = "compositing" @@ -148,7 +148,7 @@ def INPUT_TYPES(s): } CATEGORY = "compositing" - RETURN_TYPES = ("IMAGE", "ALPHA") + RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "split_image_with_alpha" def split_image_with_alpha(self, image: torch.Tensor): @@ -164,7 +164,7 @@ def INPUT_TYPES(s): return { "required": { "image": ("IMAGE",), - "alpha": ("ALPHA",), + "alpha": ("MASK",), } } @@ -183,50 +183,10 @@ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): return result -class ConvertAlphaToImage: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "alpha": ("ALPHA",), - } - } - - CATEGORY = "compositing" - RETURN_TYPES = ("IMAGE",) - FUNCTION = "alpha_to_image" - - def alpha_to_image(self, alpha): - result = alpha.reshape((-1, 1, alpha.shape[-2], alpha.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) - return (result,) - - -class ConvertImageToAlpha: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "channel": (["red", "green", "blue", "alpha"],), - } - } - - CATEGORY = "compositing" - RETURN_TYPES = ("ALPHA",) - FUNCTION = "image_to_alpha" - - def image_to_alpha(self, image, channel): - channels = ["red", "green", "blue", "alpha"] - alpha = image[0, :, :, channels.index(channel)] - return (alpha,) - - NODE_CLASS_MAPPINGS = { "PorterDuffImageComposite": PorterDuffImageComposite, "SplitImageWithAlpha": SplitImageWithAlpha, "JoinImageWithAlpha": JoinImageWithAlpha, - "ConvertAlphaToImage": ConvertAlphaToImage, - "ConvertImageToAlpha": ConvertImageToAlpha, } @@ -234,6 +194,4 @@ def image_to_alpha(self, image, channel): "PorterDuffImageComposite": "Porter-Duff Image Composite", "SplitImageWithAlpha": "Split Image with Alpha", "JoinImageWithAlpha": "Join Image with Alpha", - "ConvertAlphaToImage": "Convert Alpha to Image", - "ConvertImageToAlpha": "Convert Image to Alpha", } diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index cdf762ffd05..9b0b289c189 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -114,7 +114,7 @@ def INPUT_TYPES(s): return { "required": { "image": ("IMAGE",), - "channel": (["red", "green", "blue"],), + "channel": (["red", "green", "blue", "alpha"],), } } @@ -124,7 +124,7 @@ def INPUT_TYPES(s): FUNCTION = "image_to_mask" def image_to_mask(self, image, channel): - channels = ["red", "green", "blue"] + channels = ["red", "green", "blue", "alpha"] mask = image[:, :, :, channels.index(channel)] return (mask,) diff --git a/nodes.py b/nodes.py index 8be332f9188..9f8e58d0f21 100644 --- a/nodes.py +++ b/nodes.py @@ -1383,7 +1383,7 @@ def INPUT_TYPES(s): CATEGORY = "compositing" - RETURN_TYPES = ("IMAGE", "ALPHA") + RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "load_image" def load_image(self, image): From 585fb0475bbaf919bd340c72d339752bbb93ef55 Mon Sep 17 00:00:00 2001 From: MoonRide303 Date: Sat, 23 Sep 2023 13:19:42 +0200 Subject: [PATCH 045/420] Adding default alpha when splitting RGB images --- comfy_extras/nodes_compositing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index 6899e4a86bc..b0ae2dfa029 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -153,7 +153,7 @@ def INPUT_TYPES(s): def split_image_with_alpha(self, image: torch.Tensor): out_images = [i[:,:,:3] for i in image] - out_alphas = [i[:,:,3] for i in image] + out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image] result = (torch.stack(out_images), torch.stack(out_alphas)) return result From 214ca7197ef753bce3b40f642c6775d919568c2f Mon Sep 17 00:00:00 2001 From: MoonRide303 Date: Sun, 24 Sep 2023 00:12:55 +0200 Subject: [PATCH 046/420] Corrected joining images with alpha (for RGBA input), and checking scaling conditions --- comfy_extras/nodes_compositing.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index b0ae2dfa029..f39daa00916 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -113,19 +113,21 @@ def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destinatio src_image = source[i] dst_image = destination[i] + assert src_image.shape[2] == dst_image.shape[2] # inputs need to have same number of channels + src_alpha = source_alpha[i].unsqueeze(2) dst_alpha = destination_alpha[i].unsqueeze(2) - if dst_alpha.shape != dst_image.shape: - upscale_input = dst_alpha[None,:,:,:].permute(0, 3, 1, 2) + if dst_alpha.shape[:2] != dst_image.shape[:2]: + upscale_input = dst_alpha.unsqueeze(0).permute(0, 3, 1, 2) upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center') dst_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0) if src_image.shape != dst_image.shape: - upscale_input = src_image[None,:,:,:].permute(0, 3, 1, 2) + upscale_input = src_image.unsqueeze(0).permute(0, 3, 1, 2) upscale_output = comfy.utils.common_upscale(upscale_input, dst_image.shape[1], dst_image.shape[0], upscale_method='bicubic', crop='center') src_image = upscale_output.permute(0, 2, 3, 1).squeeze(0) if src_alpha.shape != dst_alpha.shape: - upscale_input = src_alpha[None,:,:,:].permute(0, 3, 1, 2) + upscale_input = src_alpha.unsqueeze(0).permute(0, 3, 1, 2) upscale_output = comfy.utils.common_upscale(upscale_input, dst_alpha.shape[1], dst_alpha.shape[0], upscale_method='bicubic', crop='center') src_alpha = upscale_output.permute(0, 2, 3, 1).squeeze(0) @@ -177,7 +179,7 @@ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): out_images = [] for i in range(batch_size): - out_images.append(torch.cat((image[i], alpha[i].unsqueeze(2)), dim=2)) + out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2)) result = (torch.stack(out_images),) return result From 9212bea87c47af5a1d9b51d59a2cf17e9a00e73f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 4 Oct 2023 14:40:17 -0400 Subject: [PATCH 047/420] Change a few things in #1578. --- comfy_extras/nodes_compositing.py | 6 +++--- nodes.py | 27 --------------------------- 2 files changed, 3 insertions(+), 30 deletions(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index f39daa00916..f8901eca172 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -102,7 +102,7 @@ def INPUT_TYPES(s): RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "composite" - CATEGORY = "compositing" + CATEGORY = "mask/compositing" def composite(self, source: torch.Tensor, source_alpha: torch.Tensor, destination: torch.Tensor, destination_alpha: torch.Tensor, mode): batch_size = min(len(source), len(source_alpha), len(destination), len(destination_alpha)) @@ -149,7 +149,7 @@ def INPUT_TYPES(s): } } - CATEGORY = "compositing" + CATEGORY = "mask/compositing" RETURN_TYPES = ("IMAGE", "MASK") FUNCTION = "split_image_with_alpha" @@ -170,7 +170,7 @@ def INPUT_TYPES(s): } } - CATEGORY = "compositing" + CATEGORY = "mask/compositing" RETURN_TYPES = ("IMAGE",) FUNCTION = "join_image_with_alpha" diff --git a/nodes.py b/nodes.py index 9f8e58d0f21..16bf07ccaa2 100644 --- a/nodes.py +++ b/nodes.py @@ -1372,31 +1372,6 @@ def VALIDATE_INPUTS(s, image): return True -class LoadImageWithAlpha(LoadImage): - @classmethod - def INPUT_TYPES(s): - input_dir = folder_paths.get_input_directory() - files = [f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir, f))] - return {"required": - {"image": (sorted(files), {"image_upload": True})}, - } - - CATEGORY = "compositing" - - RETURN_TYPES = ("IMAGE", "MASK") - - FUNCTION = "load_image" - def load_image(self, image): - image_path = folder_paths.get_annotated_filepath(image) - i = Image.open(image_path) - i = ImageOps.exif_transpose(i) - image = i.convert("RGBA") - alpha = np.array(image.getchannel("A")).astype(np.float32) / 255.0 - alpha = torch.from_numpy(alpha)[None,] - image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - return (image, alpha) - class LoadImageMask: _color_channels = ["alpha", "red", "green", "blue"] @classmethod @@ -1631,7 +1606,6 @@ def expand_image(self, image, left, top, right, bottom, feathering): "SaveImage": SaveImage, "PreviewImage": PreviewImage, "LoadImage": LoadImage, - "LoadImageWithAlpha": LoadImageWithAlpha, "LoadImageMask": LoadImageMask, "ImageScale": ImageScale, "ImageScaleBy": ImageScaleBy, @@ -1728,7 +1702,6 @@ def expand_image(self, image, left, top, right, bottom, feathering): "SaveImage": "Save Image", "PreviewImage": "Preview Image", "LoadImage": "Load Image", - "LoadImageWithAlpha": "Load Image with Alpha", "LoadImageMask": "Load Image (as Mask)", "ImageScale": "Upscale Image", "ImageScaleBy": "Upscale Image By", From 0b9246d9fad06834e8418904eb189d57f65c8eb7 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 4 Oct 2023 20:48:55 +0100 Subject: [PATCH 048/420] allow connecting numbers merging config --- web/extensions/core/widgetInputs.js | 226 +++++++++++++++++++++++----- 1 file changed, 188 insertions(+), 38 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 98d52b02c97..ccf437ed472 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -3,6 +3,7 @@ import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; +const CONFIG = Symbol(); function getConfig(widgetName) { const { nodeData } = this.constructor; @@ -154,9 +155,6 @@ app.registerExtension({ input.widget.getConfig = getConfig.bind(this, input.widget.name); } - const config = input.widget.getConfig(); - if (config[1]?.forceInput) continue; - const w = this.widgets.find((w) => w.name === input.widget.name); if (w) { hideWidget(this, w); @@ -306,9 +304,17 @@ app.registerExtension({ this.#onFirstConnection(); // Populate widget values from config data - for (let i = 0; i < this.widgets_values.length; i++) { - this.widgets[i].value = this.widgets_values[i]; + if (this.widgets) { + for (let i = 0; i < this.widgets_values.length; i++) { + const w = this.widgets[i]; + if (w) { + w.value = this.widgets_values[i]; + } + } } + + // Merge values if required + this.#mergeWidgetConfig(); } } @@ -318,12 +324,18 @@ app.registerExtension({ return; } + const links = this.outputs[0].links; if (connected) { - if (this.outputs[0].links?.length && !this.widgets?.length) { + if (links?.length && !this.widgets?.length) { this.#onFirstConnection(); } - } else if (!this.outputs[0].links?.length) { - this.#onLastDisconnect(); + } else { + // We may have removed a link that caused the constraints to change + this.#mergeWidgetConfig(); + + if (!links?.length) { + this.#onLastDisconnect(); + } } } @@ -340,7 +352,7 @@ app.registerExtension({ } } - #onFirstConnection() { + #onFirstConnection(recreating) { // First connection can fire before the graph is ready on initial load so random things can be missing const linkId = this.outputs[0].links[0]; const link = this.graph.links[linkId]; @@ -366,10 +378,10 @@ app.registerExtension({ this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget.getConfig(), theirNode, widget.name); + this.#createWidget(widget[CONFIG] ?? widget.getConfig(), theirNode, widget.name, recreating); } - #createWidget(inputData, node, widgetName) { + #createWidget(inputData, node, widgetName, recreating) { let type = inputData[0]; if (type instanceof Array) { @@ -404,25 +416,70 @@ app.registerExtension({ return r; }; - // Grow our node if required - const sz = this.computeSize(); - if (this.size[0] < sz[0]) { - this.size[0] = sz[0]; + if (!recreating) { + // Grow our node if required + const sz = this.computeSize(); + if (this.size[0] < sz[0]) { + this.size[0] = sz[0]; + } + if (this.size[1] < sz[1]) { + this.size[1] = sz[1]; + } + + requestAnimationFrame(() => { + if (this.onResize) { + this.onResize(this.size); + } + }); } - if (this.size[1] < sz[1]) { - this.size[1] = sz[1]; + } + + #recreateWidget() { + const values = this.widgets.map((w) => w.value); + this.#removeWidgets(); + this.#onFirstConnection(true); + for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i]; + } + + #mergeWidgetConfig() { + // Merge widget configs if the node has multiple outputs + const output = this.outputs[0]; + const links = output.links; + + const hasConfig = !!output.widget[CONFIG]; + if (hasConfig) { + delete output.widget[CONFIG]; } - requestAnimationFrame(() => { - if (this.onResize) { - this.onResize(this.size); + if (links?.length < 2 && hasConfig) { + // Copy the widget options from the source + if (links.length) { + this.#recreateWidget(); } - }); + + return; + } + + const config1 = output.widget.getConfig(); + const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; + if (!isNumber) return; + + for (const linkId of links) { + const link = app.graph.links[linkId]; + if (!link) continue; // Can be null when removing a node + + const theirNode = app.graph.getNodeById(link.target_id); + const theirInput = theirNode.inputs[link.target_slot]; + + // Call is valid connection so it can merge the configs when validating + this.#isValidConnection(theirInput, hasConfig); + } } - #isValidConnection(input) { + #isValidConnection(input, forceUpdate) { // Only allow connections where the configs match - const config1 = this.outputs[0].widget.getConfig(); + const output = this.outputs[0]; + const config1 = output.widget[CONFIG] ?? output.widget.getConfig(); const config2 = input.widget.getConfig(); if (config1[0] instanceof Array) { @@ -430,34 +487,117 @@ app.registerExtension({ // but double checking doesn't hurt // New input isnt a combo - if (!(config2[0] instanceof Array)) return false; + if (!(config2[0] instanceof Array)) { + console.log(`connection rejected: tried to connect combo to ${config2[0]}`); + return false; + } // New imput combo has a different size - if (config1[0].length !== config2[0].length) return false; + if (config1[0].length !== config2[0].length) { + console.log(`connection rejected: combo lists dont match`); + return false; + } // New input combo has different elements - if (config1[0].find((v, i) => config2[0][i] !== v)) return false; + if (config1[0].find((v, i) => config2[0][i] !== v)) { + console.log(`connection rejected: combo lists dont match`); + return false; + } } else if (config1[0] !== config2[0]) { - // Configs dont match + // Types dont match + console.log(`connection rejected: types dont match`, config1[0], config2[0]); return false; } - for (const k in config1[1]) { - if (k !== "default" && k !== "forceInput") { - if (config1[1][k] !== config2[1][k]) { - return false; + const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]); + + let customConfig; + const getCustomConfig = () => { + if (!customConfig) { + if (typeof structuredClone === "undefined") { + customConfig = JSON.parse(JSON.stringify(config1[1] ?? {})); + } else { + customConfig = structuredClone(config1[1] ?? {}); + } + } + return customConfig; + }; + + const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; + for (const k of keys.values()) { + if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { + let v1 = config1[1][k]; + let v2 = config2[1][k]; + + if (v1 === v2 || (!v1 && !v2)) continue; + + if (isNumber) { + if (k === "min") { + const theirMax = config2[1]["max"]; + if (theirMax != null && v1 > theirMax) { + console.log("Invalid connection, min > max"); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); + continue; + } else if (k === "max") { + const theirMin = config2[1]["min"]; + if (theirMin != null && v1 < theirMin) { + console.log("Invalid connection, max < min"); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); + continue; + } else if (k === "step") { + let step; + if (v1 == null) { + step = v2; + } else if (v2 == null) { + step = v1; + } else { + if (v1 < v2) { + const a = v2; + v2 = v1; + v1 = a; + } + if (v1 % v2) { + console.log("Steps not divisible", "current:", v1, "new:", v2); + return false; + } + + step = v1; + } + + getCustomConfig()[k] = step; + continue; + } } + + console.log(`connection rejected: config ${k} values dont match`, v1, v2); + return false; + } + } + + if (customConfig || forceUpdate) { + if (customConfig) { + output.widget[CONFIG] = [config1[0], customConfig]; + } + + this.#recreateWidget(); + + const widget = this.widgets[0]; + // When deleting a node this can be null + if (widget) { + const min = widget.options.min; + const max = widget.options.max; + if (min != null && widget.value < min) widget.value = min; + if (max != null && widget.value > max) widget.value = max; + widget.callback(widget.value); } } return true; } - #onLastDisconnect() { - // We cant remove + re-add the output here as if you drag a link over the same link - // it removes, then re-adds, causing it to break - this.outputs[0].type = "*"; - this.outputs[0].name = "connect to widget input"; - delete this.outputs[0].widget; - + #removeWidgets() { if (this.widgets) { // Allow widgets to cleanup for (const w of this.widgets) { @@ -468,6 +608,16 @@ app.registerExtension({ this.widgets.length = 0; } } + + #onLastDisconnect() { + // We cant remove + re-add the output here as if you drag a link over the same link + // it removes, then re-adds, causing it to break + this.outputs[0].type = "*"; + this.outputs[0].name = "connect to widget input"; + delete this.outputs[0].widget; + + this.#removeWidgets(); + } } LiteGraph.registerNodeType( From 0e763e880f5e838e7a1e3914444cae6790c48627 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 4 Oct 2023 15:54:34 -0400 Subject: [PATCH 049/420] JoinImageWithAlpha now works with any mask shape. --- comfy_extras/nodes_compositing.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index f8901eca172..68bfce11110 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -3,6 +3,8 @@ import comfy.utils from enum import Enum +def resize_mask(mask, shape): + return torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[0], shape[1]), mode="bilinear").squeeze(1) class PorterDuffMode(Enum): ADD = 0 @@ -178,6 +180,7 @@ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): batch_size = min(len(image), len(alpha)) out_images = [] + alpha = resize_mask(alpha, image.shape[1:]) for i in range(batch_size): out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2)) From 63e5fd17907fa2100725d6d46053084c14065232 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Wed, 4 Oct 2023 19:45:15 -0300 Subject: [PATCH 050/420] Option to input directory --- comfy/cli_args.py | 1 + folder_paths.py | 4 ++++ main.py | 5 +++++ 3 files changed, 10 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index ffae81c49d1..35d44164f1c 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -39,6 +39,7 @@ def __call__(self, parser, namespace, values, option_string=None): parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).") +parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.") parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.") parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.") parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.") diff --git a/folder_paths.py b/folder_paths.py index 4a10c68e7e7..898513b0e1f 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -46,6 +46,10 @@ def set_temp_directory(temp_dir): global temp_directory temp_directory = temp_dir +def set_input_directory(input_dir): + global input_directory + input_directory = input_dir + def get_output_directory(): global output_directory return output_directory diff --git a/main.py b/main.py index 7c5eaee0a83..875ea1aa908 100644 --- a/main.py +++ b/main.py @@ -175,6 +175,11 @@ def load_extra_path_config(yaml_path): print(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) + if args.input_directory: + input_dir = os.path.abspath(args.input_directory) + print(f"Setting input directory to: {input_dir}") + folder_paths.set_input_directory(input_dir) + if args.quick_test_for_ci: exit(0) From 48242be50866f5d6d22d120743d5d39cd6a0c178 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 5 Oct 2023 08:25:15 -0400 Subject: [PATCH 051/420] Update readme for pytorch 2.1 --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 97677921add..559e99ffa4d 100644 --- a/README.md +++ b/README.md @@ -92,16 +92,16 @@ Put your VAE in: models/vae ### AMD GPUs (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` +```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6``` -This is the command to install the nightly with ROCm 5.7 that supports the 7000 series and might have some performance improvements: +This is the command to install the nightly with ROCm 5.7 that might have some performance improvements: ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7``` ### NVIDIA -Nvidia users should install torch and xformers using this command: +Nvidia users should install pytorch using this command: -```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers``` +```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121``` #### Troubleshooting From 80932ddf406c7da0ab97855801c468cfafa50386 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:13:13 +0100 Subject: [PATCH 052/420] updated messages --- web/extensions/core/widgetInputs.js | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index ccf437ed472..271b02db38b 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -533,7 +533,7 @@ app.registerExtension({ if (k === "min") { const theirMax = config2[1]["max"]; if (theirMax != null && v1 > theirMax) { - console.log("Invalid connection, min > max"); + console.log("connection rejected: min > max", v1, theirMax); return false; } getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); @@ -541,7 +541,7 @@ app.registerExtension({ } else if (k === "max") { const theirMin = config2[1]["min"]; if (theirMin != null && v1 < theirMin) { - console.log("Invalid connection, max < min"); + console.log("connection rejected: max < min", v1, theirMin); return false; } getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); @@ -549,17 +549,20 @@ app.registerExtension({ } else if (k === "step") { let step; if (v1 == null) { + // No current step step = v2; } else if (v2 == null) { + // No new step step = v1; } else { if (v1 < v2) { + // Ensure v1 is larger for the mod const a = v2; v2 = v1; v1 = a; } if (v1 % v2) { - console.log("Steps not divisible", "current:", v1, "new:", v2); + console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2); return false; } From b9b178b8394122651118c7453518320604a3f1f1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 5 Oct 2023 19:16:39 +0100 Subject: [PATCH 053/420] More cleanup of old type data Fix connecting combos of same type from different types of node --- web/extensions/core/widgetInputs.js | 34 +++++++++++++++++------------ 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 271b02db38b..c734ffe273a 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -61,11 +61,11 @@ function showWidget(widget) { function convertToInput(node, widget, config) { hideWidget(node, widget); - const { linkType } = getWidgetType(config, `${node.comfyClass}|${widget.name}`); + const { type } = getWidgetType(config); // Add input and store widget config for creating on primitive node const sz = node.size; - node.addInput(widget.name, linkType, { + node.addInput(widget.name, type, { widget: { name: widget.name, getConfig: () => config }, }); @@ -90,15 +90,13 @@ function convertToWidget(node, widget) { node.setSize([Math.max(sz[0], node.size[0]), Math.max(sz[1], node.size[1])]); } -function getWidgetType(config, comboType) { +function getWidgetType(config) { // Special handling for COMBO so we restrict links based on the entries let type = config[0]; - let linkType = type; if (type instanceof Array) { type = "COMBO"; - linkType = comboType; } - return { type, linkType }; + return { type }; } app.registerExtension({ @@ -148,13 +146,24 @@ app.registerExtension({ for (const input of this.inputs) { if (input.widget) { - // Cleanup old widget config - delete input.widget.config; - if (!input.widget.getConfig) { input.widget.getConfig = getConfig.bind(this, input.widget.name); } + // Cleanup old widget config + if (input.widget.config) { + if (input.widget.config[0] instanceof Array) { + // If we are an old converted combo then replace the input type and the stored link data + input.type = "COMBO"; + + const link = app.graph.links[input.link]; + if (link) { + link.type = input.type; + } + } + delete input.widget.config; + } + const w = this.widgets.find((w) => w.name === input.widget.name); if (w) { hideWidget(this, w); @@ -372,9 +381,9 @@ app.registerExtension({ widget = input.widget; } - const { type, linkType } = getWidgetType(widget.getConfig(), `${theirNode.comfyClass}|${widget.name}`); + const { type } = getWidgetType(widget.getConfig()); // Update our output to restrict to the widget type - this.outputs[0].type = linkType; + this.outputs[0].type = type; this.outputs[0].name = type; this.outputs[0].widget = widget; @@ -483,9 +492,6 @@ app.registerExtension({ const config2 = input.widget.getConfig(); if (config1[0] instanceof Array) { - // These checks shouldnt actually be necessary as the types should match - // but double checking doesn't hurt - // New input isnt a combo if (!(config2[0] instanceof Array)) { console.log(`connection rejected: tried to connect combo to ${config2[0]}`); From 6f464f801f718bf9c1274aa49252deb0b52fbd51 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 03:32:00 -0400 Subject: [PATCH 054/420] Update nightly workflow to python 3.11.6 --- .github/workflows/windows_release_nightly_pytorch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index 319942e7c58..b793f7fe2b2 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -20,12 +20,12 @@ jobs: persist-credentials: false - uses: actions/setup-python@v4 with: - python-version: '3.11.3' + python-version: '3.11.6' - shell: bash run: | cd .. cp -r ComfyUI ComfyUI_copy - curl https://www.python.org/ftp/python/3.11.3/python-3.11.3-embed-amd64.zip -o python_embeded.zip + curl https://www.python.org/ftp/python/3.11.6/python-3.11.6-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded echo 'import site' >> ./python311._pth From 34b36e3207522aa1a3e48a17e628c0aae3c4c5c9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 10:26:51 -0400 Subject: [PATCH 055/420] More configurable workflows to package windows release. --- .../windows_release_dependencies.yml | 53 ++++++++++ .github/workflows/windows_release_package.yml | 96 +++++++++++++++++++ 2 files changed, 149 insertions(+) create mode 100644 .github/workflows/windows_release_dependencies.yml create mode 100644 .github/workflows/windows_release_package.yml diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml new file mode 100644 index 00000000000..590495c6523 --- /dev/null +++ b/.github/workflows/windows_release_dependencies.yml @@ -0,0 +1,53 @@ +name: "Windows Release dependencies" + +on: + workflow_dispatch: + inputs: + xformers: + description: 'xformers version' + required: true + type: string + default: "" + cu: + description: 'cuda version' + required: true + type: string + default: "121" + + python_minor: + description: 'python minor version' + required: true + type: string + default: "11" + + python_patch: + description: 'python patch version' + required: true + type: string + default: "6" +# push: +# branches: +# - master + +jobs: + build_dependencies: + runs-on: windows-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }} + + - shell: bash + run: | + python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir + python -m pip install --no-cache-dir ./temp_wheel_dir/* + echo installed basic + ls -lah temp_wheel_dir + mv temp_wheel_dir cu${{ inputs.cu }}_python_deps + tar cf cu${{ inputs.cu }}_python_deps.tar cu${{ inputs.cu }}_python_deps + + - uses: actions/cache/save@v3 + with: + path: cu${{ inputs.cu }}_python_deps.tar + key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }} diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml new file mode 100644 index 00000000000..bc26db282a3 --- /dev/null +++ b/.github/workflows/windows_release_package.yml @@ -0,0 +1,96 @@ +name: "Windows Release packaging" + +on: + workflow_dispatch: + cu: + description: 'cuda version' + required: true + type: string + default: "121" + + python_minor: + description: 'python minor version' + required: true + type: string + default: "11" + + python_patch: + description: 'python patch version' + required: true + type: string + default: "6" +# push: +# branches: +# - master + +jobs: + package_comfyui: + permissions: + contents: "write" + packages: "write" + pull-requests: "read" + runs-on: windows-latest + steps: + - uses: actions/cache/restore@v3 + id: cache + with: + path: cu${{ inputs.cu }}_python_deps.tar + key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }} + - shell: bash + run: | + mv cu${{ inputs.cu }}_python_deps.tar ../ + cd .. + tar xf cu${{ inputs.cu }}_python_deps.tar + pwd + ls + + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + persist-credentials: false + - shell: bash + run: | + cd .. + cp -r ComfyUI ComfyUI_copy + curl https://www.python.org/ftp/python/3.${{ inputs.python_minor }}.${{ inputs.python_patch }}/python-3.${{ inputs.python_minor }}.${{ inputs.python_patch }}-embed-amd64.zip -o python_embeded.zip + unzip python_embeded.zip -d python_embeded + cd python_embeded + echo 'import site' >> ./python3${{ inputs.python_minor }}._pth + curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py + ./python.exe get-pip.py + ./python.exe -s -m pip install ../cu${{ inputs.cu }}_python_deps/* + sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth + cd .. + + git clone https://github.com/comfyanonymous/taesd + cp taesd/*.pth ./ComfyUI_copy/models/vae_approx/ + + mkdir ComfyUI_windows_portable + mv python_embeded ComfyUI_windows_portable + mv ComfyUI_copy ComfyUI_windows_portable/ComfyUI + + cd ComfyUI_windows_portable + + mkdir update + cp -r ComfyUI/.ci/update_windows/* ./update/ + cp -r ComfyUI/.ci/update_windows_cu${{ inputs.cu }}/* ./update/ + cp -r ComfyUI/.ci/windows_base_files/* ./ + + cd .. + + "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable + mv ComfyUI_windows_portable.7z ComfyUI/new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z + + cd ComfyUI_windows_portable + python_embeded/python.exe -s ComfyUI/main.py --quick-test-for-ci --cpu + + ls + + - name: Upload binaries to release + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: new_ComfyUI_windows_portable_nvidia_cu${{ inputs.cu }}_or_cpu.7z + tag: "latest" + overwrite: true + From 640d5080e53cc687384fdfa807ca0c29a16e6687 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 10:29:52 -0400 Subject: [PATCH 056/420] Make xformers optional in packaging. --- .github/workflows/windows_release_dependencies.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index 590495c6523..104639a0525 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -5,7 +5,7 @@ on: inputs: xformers: description: 'xformers version' - required: true + required: false type: string default: "" cu: From 1497528de8fbacd400921a1c0a307356aea94abf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 10:43:12 -0400 Subject: [PATCH 057/420] Fix workflow. --- .github/workflows/windows_release_package.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index bc26db282a3..a4f36a7067d 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -2,6 +2,7 @@ name: "Windows Release packaging" on: workflow_dispatch: + inputs: cu: description: 'cuda version' required: true From d761eaa4864e21d9302c6e58eb36daa20cecee6a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 6 Oct 2023 17:47:46 +0100 Subject: [PATCH 058/420] if the output type is an array, use combo --- web/scripts/app.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 3c29a684ad4..5b9e7658069 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1306,7 +1306,8 @@ export class ComfyApp { } for (const o in nodeData["output"]) { - const output = nodeData["output"][o]; + let output = nodeData["output"][o]; + if(output instanceof Array) output = "COMBO"; const outputName = nodeData["output_name"][o] || output; const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ; this.addOutput(outputName, output, { shape: outputShape }); From 0134d7ab49702b71af37451c647fedb8814704ac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 12:49:40 -0400 Subject: [PATCH 059/420] Generate update script with right settings. --- .../workflows/windows_release_dependencies.yml | 16 +++++++++++++++- .github/workflows/windows_release_package.yml | 7 +++++-- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index 104639a0525..f2ac940746c 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -40,6 +40,18 @@ jobs: - shell: bash run: | + echo "@echo off + ..\python_embeded\python.exe .\update.py ..\ComfyUI\ + echo + echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff + echo You should not be running this anyways unless you really have to + echo + echo If you just want to update normally, close this and run update_comfyui.bat instead. + echo + pause + ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 + pause" > update_comfyui_and_python_dependencies.bat + python -m pip wheel --no-cache-dir torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r requirements.txt pygit2 -w ./temp_wheel_dir python -m pip install --no-cache-dir ./temp_wheel_dir/* echo installed basic @@ -49,5 +61,7 @@ jobs: - uses: actions/cache/save@v3 with: - path: cu${{ inputs.cu }}_python_deps.tar + path: | + cu${{ inputs.cu }}_python_deps.tar + update_comfyui_and_python_dependencies.bat key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }} diff --git a/.github/workflows/windows_release_package.yml b/.github/workflows/windows_release_package.yml index a4f36a7067d..87d37c24d89 100644 --- a/.github/workflows/windows_release_package.yml +++ b/.github/workflows/windows_release_package.yml @@ -35,11 +35,14 @@ jobs: - uses: actions/cache/restore@v3 id: cache with: - path: cu${{ inputs.cu }}_python_deps.tar + path: | + cu${{ inputs.cu }}_python_deps.tar + update_comfyui_and_python_dependencies.bat key: ${{ runner.os }}-build-cu${{ inputs.cu }}-${{ inputs.python_minor }} - shell: bash run: | mv cu${{ inputs.cu }}_python_deps.tar ../ + mv update_comfyui_and_python_dependencies.bat ../ cd .. tar xf cu${{ inputs.cu }}_python_deps.tar pwd @@ -74,8 +77,8 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ - cp -r ComfyUI/.ci/update_windows_cu${{ inputs.cu }}/* ./update/ cp -r ComfyUI/.ci/windows_base_files/* ./ + cp ../update_comfyui_and_python_dependencies.bat ./update/ cd .. From 72188dffc3d331be41e366c4f0fa6883645f669a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 6 Oct 2023 13:48:18 -0400 Subject: [PATCH 060/420] load_checkpoint_guess_config can now optionally output the model. --- comfy/sd.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index f186273eaa6..cfd6fb3cb56 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -394,13 +394,14 @@ class EmptyClass: return (comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae) -def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None): +def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, output_clipvision=False, embedding_directory=None, output_model=True): sd = comfy.utils.load_torch_file(ckpt_path) sd_keys = sd.keys() clip = None clipvision = None vae = None model = None + model_patcher = None clip_target = None parameters = comfy.utils.calculate_parameters(sd, "model.diffusion_model.") @@ -421,10 +422,11 @@ class WeightsLoader(torch.nn.Module): if fp16: dtype = torch.float16 - inital_load_device = model_management.unet_inital_load_device(parameters, dtype) - offload_device = model_management.unet_offload_device() - model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device) - model.load_model_weights(sd, "model.diffusion_model.") + if output_model: + inital_load_device = model_management.unet_inital_load_device(parameters, dtype) + offload_device = model_management.unet_offload_device() + model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device) + model.load_model_weights(sd, "model.diffusion_model.") if output_vae: vae = VAE() @@ -444,10 +446,11 @@ class WeightsLoader(torch.nn.Module): if len(left_over) > 0: print("left over keys:", left_over) - model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device) - if inital_load_device != torch.device("cpu"): - print("loaded straight to GPU") - model_management.load_model_gpu(model_patcher) + if output_model: + model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device) + if inital_load_device != torch.device("cpu"): + print("loaded straight to GPU") + model_management.load_model_gpu(model_patcher) return (model_patcher, clip, vae, clipvision) From ae3e4e9ad821c12b955f6b2343e6255e6d71eaf7 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:48:30 +0100 Subject: [PATCH 061/420] access getConfig via a symbol so structuredClone works (#1677) --- web/extensions/core/widgetInputs.js | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index c734ffe273a..3c9da458d6a 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -4,6 +4,7 @@ import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; const CONFIG = Symbol(); +const GET_CONFIG = Symbol(); function getConfig(widgetName) { const { nodeData } = this.constructor; @@ -66,7 +67,7 @@ function convertToInput(node, widget, config) { // Add input and store widget config for creating on primitive node const sz = node.size; node.addInput(widget.name, type, { - widget: { name: widget.name, getConfig: () => config }, + widget: { name: widget.name, [GET_CONFIG]: () => config }, }); for (const widget of node.widgets) { @@ -146,8 +147,8 @@ app.registerExtension({ for (const input of this.inputs) { if (input.widget) { - if (!input.widget.getConfig) { - input.widget.getConfig = getConfig.bind(this, input.widget.name); + if (!input.widget[GET_CONFIG]) { + input.widget[GET_CONFIG] = () => getConfig.call(this, input.widget.name); } // Cleanup old widget config @@ -197,8 +198,8 @@ app.registerExtension({ if (!app.configuringGraph && this.inputs) { // On copy + paste of nodes, ensure that widget configs are set up for (const input of this.inputs) { - if (input.widget && !input.widget.getConfig) { - input.widget.getConfig = getConfig.bind(this, input.widget.name); + if (input.widget && !input.widget[GET_CONFIG]) { + input.widget[GET_CONFIG] = () => getConfig.call(this, input.widget.name); } } } @@ -224,7 +225,7 @@ app.registerExtension({ const input = this.inputs[slot]; if (!input.widget || !input[ignoreDblClick]) { // Not a widget input or already handled input - if (!(input.type in ComfyWidgets) && !(input.widget.getConfig?.()?.[0] instanceof Array)) { + if (!(input.type in ComfyWidgets) && !(input.widget[GET_CONFIG]?.()?.[0] instanceof Array)) { return r; //also Not a ComfyWidgets input or combo (do nothing) } } @@ -299,7 +300,7 @@ app.registerExtension({ refreshComboInNode() { const widget = this.widgets?.[0]; if (widget?.type === "combo") { - widget.options.values = this.outputs[0].widget.getConfig()[0]; + widget.options.values = this.outputs[0].widget[GET_CONFIG]()[0]; if (!widget.options.values.includes(widget.value)) { widget.value = widget.options.values[0]; @@ -376,18 +377,18 @@ app.registerExtension({ let widget; if (!input.widget) { if (!(input.type in ComfyWidgets)) return; - widget = { name: input.name, getConfig: () => [input.type, {}] }; //fake widget + widget = { name: input.name, [GET_CONFIG]: () => [input.type, {}] }; //fake widget } else { widget = input.widget; } - const { type } = getWidgetType(widget.getConfig()); + const { type } = getWidgetType(widget[GET_CONFIG]()); // Update our output to restrict to the widget type this.outputs[0].type = type; this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget[CONFIG] ?? widget.getConfig(), theirNode, widget.name, recreating); + this.#createWidget(widget[CONFIG] ?? widget[GET_CONFIG](), theirNode, widget.name, recreating); } #createWidget(inputData, node, widgetName, recreating) { @@ -469,7 +470,7 @@ app.registerExtension({ return; } - const config1 = output.widget.getConfig(); + const config1 = output.widget[GET_CONFIG](); const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; if (!isNumber) return; @@ -488,8 +489,8 @@ app.registerExtension({ #isValidConnection(input, forceUpdate) { // Only allow connections where the configs match const output = this.outputs[0]; - const config1 = output.widget[CONFIG] ?? output.widget.getConfig(); - const config2 = input.widget.getConfig(); + const config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); + const config2 = input.widget[GET_CONFIG](); if (config1[0] instanceof Array) { // New input isnt a combo From 0986cc7c382c3561a4934b53383732ddae1cde80 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Oct 2023 11:57:32 -0400 Subject: [PATCH 062/420] Fix issues with the packaging. --- .github/workflows/windows_release_dependencies.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/windows_release_dependencies.yml b/.github/workflows/windows_release_dependencies.yml index f2ac940746c..aafe8a21444 100644 --- a/.github/workflows/windows_release_dependencies.yml +++ b/.github/workflows/windows_release_dependencies.yml @@ -41,13 +41,13 @@ jobs: - shell: bash run: | echo "@echo off - ..\python_embeded\python.exe .\update.py ..\ComfyUI\ - echo + ..\python_embeded\python.exe .\update.py ..\ComfyUI\\ + echo - echo This will try to update pytorch and all python dependencies, if you get an error wait for pytorch/xformers to fix their stuff echo You should not be running this anyways unless you really have to - echo + echo - echo If you just want to update normally, close this and run update_comfyui.bat instead. - echo + echo - pause ..\python_embeded\python.exe -s -m pip install --upgrade torch torchvision torchaudio ${{ inputs.xformers }} --extra-index-url https://download.pytorch.org/whl/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 pause" > update_comfyui_and_python_dependencies.bat From 1c5d6663faf1a33e00ec67240167b174a9cac655 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 7 Oct 2023 16:13:35 -0400 Subject: [PATCH 063/420] Update standalone download link. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 559e99ffa4d..925caa73237 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ Ctrl can also be replaced with Cmd instead for macOS users There is a portable standalone build for Windows that should work for running on Nvidia GPUs or for running on your CPU only on the [releases page](https://github.com/comfyanonymous/ComfyUI/releases). -### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu118_or_cpu.7z) +### [Direct link to download](https://github.com/comfyanonymous/ComfyUI/releases/download/latest/ComfyUI_windows_portable_nvidia_cu121_or_cpu.7z) Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints From a0b1d4f21d5449ca9eb480576637a8e763cfa434 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Sun, 8 Oct 2023 16:00:33 +0900 Subject: [PATCH 064/420] improve: image preview (#1683) * improve image preview - grid mode: align in rectangle instead of first image, show cell border - individual mode: proper ratio handling * improve: fix preview button position instead of relative * improve: image preview - compact mode for same aspect ratio --- web/scripts/app.js | 125 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 29 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 5b9e7658069..7698d0f1173 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -450,6 +450,47 @@ export class ComfyApp { } } + function calculateGrid(w, h, n) { + let columns, rows, cellsize; + + if (w > h) { + cellsize = h; + columns = Math.ceil(w / cellsize); + rows = Math.ceil(n / columns); + } else { + cellsize = w; + rows = Math.ceil(h / cellsize); + columns = Math.ceil(n / rows); + } + + while (columns * rows < n) { + cellsize++; + if (w >= h) { + columns = Math.ceil(w / cellsize); + rows = Math.ceil(n / columns); + } else { + rows = Math.ceil(h / cellsize); + columns = Math.ceil(n / rows); + } + } + + const cell_size = Math.min(w/columns, h/rows); + return {cell_size, columns, rows}; + } + + function is_all_same_aspect_ratio(imgs) { + // assume: imgs.length >= 2 + let ratio = imgs[0].naturalWidth/imgs[0].naturalHeight; + + for(let i=1; i best) { - best = area; - cellWidth = imageW; - cellHeight = imageH; - cols = c; - shiftX = c * ((cW - imageW) / 2); + var cellWidth, cellHeight, shiftX, cell_padding, cols; + + const compact_mode = is_all_same_aspect_ratio(this.imgs); + if(!compact_mode) { + // use rectangle cell style and border line + cell_padding = 2; + const { cell_size, columns, rows } = calculateGrid(dw, dh, numImages); + cols = columns; + + cellWidth = cell_size; + cellHeight = cell_size; + shiftX = (dw-cell_size*cols)/2; + shiftY = (dh-cell_size*rows)/2 + top; + } + else { + cell_padding = 0; + let best = 0; + let w = this.imgs[0].naturalWidth; + let h = this.imgs[0].naturalHeight; + + // compact style + for (let c = 1; c <= numImages; c++) { + const rows = Math.ceil(numImages / c); + const cW = dw / c; + const cH = dh / rows; + const scaleX = cW / w; + const scaleY = cH / h; + + const scale = Math.min(scaleX, scaleY, 1); + const imageW = w * scale; + const imageH = h * scale; + const area = imageW * imageH * numImages; + + if (area > best) { + best = area; + cellWidth = imageW; + cellHeight = imageH; + cols = c; + shiftX = c * ((cW - imageW) / 2); + } } } @@ -542,7 +599,14 @@ export class ComfyApp { let imgWidth = ratio * img.width; let imgX = col * cellWidth + shiftX + (cellWidth - imgWidth)/2; - ctx.drawImage(img, imgX, imgY, imgWidth, imgHeight); + ctx.drawImage(img, imgX+cell_padding, imgY+cell_padding, imgWidth-cell_padding*2, imgHeight-cell_padding*2); + if(!compact_mode) { + // rectangle cell and border line style + ctx.strokeStyle = "#8F8F8F"; + ctx.lineWidth = 1; + ctx.strokeRect(x+cell_padding, y+cell_padding, cellWidth-cell_padding*2, cellHeight-cell_padding*2); + } + ctx.filter = "none"; } @@ -552,6 +616,9 @@ export class ComfyApp { } } else { // Draw individual + let w = this.imgs[imageIndex].naturalWidth; + let h = this.imgs[imageIndex].naturalHeight; + const scaleX = dw / w; const scaleY = dh / h; const scale = Math.min(scaleX, scaleY, 1); @@ -594,14 +661,14 @@ export class ComfyApp { }; if (numImages > 1) { - if (drawButton(x + w - 35, y + h - 35, 30, `${this.imageIndex + 1}/${numImages}`)) { + if (drawButton(dw - 40, dh + top - 40, 30, `${this.imageIndex + 1}/${numImages}`)) { let i = this.imageIndex + 1 >= numImages ? 0 : this.imageIndex + 1; if (!this.pointerDown || !this.pointerDown.index === i) { this.pointerDown = { index: i, pos: [...mouse] }; } } - if (drawButton(x + w - 35, y + 5, 30, `x`)) { + if (drawButton(dw - 40, top + 10, 30, `x`)) { if (!this.pointerDown || !this.pointerDown.index === null) { this.pointerDown = { index: null, pos: [...mouse] }; } From 69a824e9a458a72225ea9e2e2874815bf0052f78 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 8 Oct 2023 03:20:35 -0400 Subject: [PATCH 065/420] Move _for_testing/custom_sampling nodes to sampling/custom_sampling. --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 42a1fd6ba03..9391c714747 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -15,7 +15,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -35,7 +35,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -53,7 +53,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -72,7 +72,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -91,7 +91,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -108,7 +108,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS","SIGMAS") - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sigmas" @@ -125,7 +125,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sampler" @@ -144,7 +144,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sampler" @@ -168,7 +168,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" FUNCTION = "get_sampler" @@ -201,7 +201,7 @@ def INPUT_TYPES(s): FUNCTION = "sample" - CATEGORY = "_for_testing/custom_sampling" + CATEGORY = "sampling/custom_sampling" def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, sigmas, latent_image): latent = latent_image From 1f2f4eaa6f04660a7df7d71eede3118d09b1c2c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 8 Oct 2023 04:04:25 -0400 Subject: [PATCH 066/420] Fix bug when copying node with converted input. --- web/extensions/core/widgetInputs.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 3c9da458d6a..ce05a29e9ed 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -200,6 +200,10 @@ app.registerExtension({ for (const input of this.inputs) { if (input.widget && !input.widget[GET_CONFIG]) { input.widget[GET_CONFIG] = () => getConfig.call(this, input.widget.name); + const w = this.widgets.find((w) => w.name === input.widget.name); + if (w) { + hideWidget(this, w); + } } } } From c16f5744e306fb042363767d771da68929f088d8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 8 Oct 2023 15:52:10 -0400 Subject: [PATCH 067/420] Fix SplitImageWithAlpha and JoinImageWithAlpha. --- comfy_extras/nodes_compositing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_compositing.py b/comfy_extras/nodes_compositing.py index 68bfce11110..181b36ed68e 100644 --- a/comfy_extras/nodes_compositing.py +++ b/comfy_extras/nodes_compositing.py @@ -158,7 +158,7 @@ def INPUT_TYPES(s): def split_image_with_alpha(self, image: torch.Tensor): out_images = [i[:,:,:3] for i in image] out_alphas = [i[:,:,3] if i.shape[2] > 3 else torch.ones_like(i[:,:,0]) for i in image] - result = (torch.stack(out_images), torch.stack(out_alphas)) + result = (torch.stack(out_images), 1.0 - torch.stack(out_alphas)) return result @@ -180,7 +180,7 @@ def join_image_with_alpha(self, image: torch.Tensor, alpha: torch.Tensor): batch_size = min(len(image), len(alpha)) out_images = [] - alpha = resize_mask(alpha, image.shape[1:]) + alpha = 1.0 - resize_mask(alpha, image.shape[1:]) for i in range(batch_size): out_images.append(torch.cat((image[i][:,:,:3], alpha[i].unsqueeze(2)), dim=2)) From 7bb9f6b7e87d533cbbbc7aceafafb744a62dbfa8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 9 Oct 2023 01:42:15 -0400 Subject: [PATCH 068/420] Add a VAESave node. --- comfy_extras/nodes_model_merging.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 3d42d78067c..1e3fc935909 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -1,6 +1,7 @@ import comfy.sd import comfy.utils import comfy.model_base +import comfy.model_management import folder_paths import json @@ -178,6 +179,39 @@ def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=Non comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata) return {} +class VAESave: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": { "vae": ("VAE",), + "filename_prefix": ("STRING", {"default": "vae/ComfyUI_vae"}),}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + + CATEGORY = "advanced/model_merging" + + def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) + prompt_info = "" + if prompt is not None: + prompt_info = json.dumps(prompt) + + metadata = {} + if not args.disable_metadata: + metadata["prompt"] = prompt_info + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + comfy.utils.save_torch_file(vae.get_sd(), output_checkpoint, metadata=metadata) + return {} NODE_CLASS_MAPPINGS = { "ModelMergeSimple": ModelMergeSimple, @@ -186,4 +220,5 @@ def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=Non "ModelMergeAdd": ModelAdd, "CheckpointSave": CheckpointSave, "CLIPMergeSimple": CLIPMergeSimple, + "VAESave": VAESave, } From 4308862ce0434718a6f3acdc7164f3a1436220e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 9 Oct 2023 01:51:01 -0400 Subject: [PATCH 069/420] Add a note to README about pytorch 3.12 not being supported. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 925caa73237..6bef25cee0b 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,8 @@ Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints Put your VAE in: models/vae +Note: pytorch does not support python 3.12 yet so make sure your python version is 3.11 or earlier. + ### AMD GPUs (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: From 9eb621c95a7c867d118e90348057474cbc96c20c Mon Sep 17 00:00:00 2001 From: Yukimasa Funaoka Date: Tue, 10 Oct 2023 13:21:44 +0900 Subject: [PATCH 070/420] Supports TAESD models in safetensors format --- comfy/latent_formats.py | 4 ++-- comfy/taesd/taesd.py | 12 ++++++++++-- latent_preview.py | 7 ++++++- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index fadc0eec752..c209087e0cc 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -20,7 +20,7 @@ def __init__(self, scale_factor=0.18215): [-0.2829, 0.1762, 0.2721], [-0.2120, -0.2616, -0.7177] ] - self.taesd_decoder_name = "taesd_decoder.pth" + self.taesd_decoder_name = "taesd_decoder" class SDXL(LatentFormat): def __init__(self): @@ -32,4 +32,4 @@ def __init__(self): [ 0.0568, 0.1687, -0.0755], [-0.3112, -0.2359, -0.2076] ] - self.taesd_decoder_name = "taesdxl_decoder.pth" + self.taesd_decoder_name = "taesdxl_decoder" diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py index 1549345ae53..92f74c11ad7 100644 --- a/comfy/taesd/taesd.py +++ b/comfy/taesd/taesd.py @@ -50,9 +50,17 @@ def __init__(self, encoder_path="taesd_encoder.pth", decoder_path="taesd_decoder self.encoder = Encoder() self.decoder = Decoder() if encoder_path is not None: - self.encoder.load_state_dict(torch.load(encoder_path, map_location="cpu", weights_only=True)) + if encoder_path.lower().endswith(".safetensors"): + import safetensors.torch + self.encoder.load_state_dict(safetensors.torch.load_file(encoder_path, device="cpu")) + else: + self.encoder.load_state_dict(torch.load(encoder_path, map_location="cpu", weights_only=True)) if decoder_path is not None: - self.decoder.load_state_dict(torch.load(decoder_path, map_location="cpu", weights_only=True)) + if decoder_path.lower().endswith(".safetensors"): + import safetensors.torch + self.decoder.load_state_dict(safetensors.torch.load_file(decoder_path, device="cpu")) + else: + self.decoder.load_state_dict(torch.load(decoder_path, map_location="cpu", weights_only=True)) @staticmethod def scale_latents(x): diff --git a/latent_preview.py b/latent_preview.py index 740e0860776..e1553c85cac 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -56,7 +56,12 @@ def get_previewer(device, latent_format): # TODO previewer methods taesd_decoder_path = None if latent_format.taesd_decoder_name is not None: - taesd_decoder_path = folder_paths.get_full_path("vae_approx", latent_format.taesd_decoder_name) + taesd_decoder_path = next( + (fn for fn in folder_paths.get_filename_list("vae_approx") + if fn.startswith(latent_format.taesd_decoder_name)), + "" + ) + taesd_decoder_path = folder_paths.get_full_path("vae_approx", taesd_decoder_path) if method == LatentPreviewMethod.Auto: method = LatentPreviewMethod.Latent2RGB From 877553843f914d9b18d470c285f0d745a4a85c06 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 10 Oct 2023 01:24:49 -0400 Subject: [PATCH 071/420] Add a CLIPSave node to save CLIP model weights. --- comfy_extras/nodes_model_merging.py | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 1e3fc935909..dad1dd6378d 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -179,6 +179,62 @@ def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=Non comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata) return {} +class CLIPSave: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip": ("CLIP",), + "filename_prefix": ("STRING", {"default": "clip/ComfyUI"}),}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + RETURN_TYPES = () + FUNCTION = "save" + OUTPUT_NODE = True + + CATEGORY = "advanced/model_merging" + + def save(self, clip, filename_prefix, prompt=None, extra_pnginfo=None): + prompt_info = "" + if prompt is not None: + prompt_info = json.dumps(prompt) + + metadata = {} + if not args.disable_metadata: + metadata["prompt"] = prompt_info + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) + + comfy.model_management.load_models_gpu([clip.load_model()]) + clip_sd = clip.get_sd() + + for prefix in ["clip_l.", "clip_g.", ""]: + k = list(filter(lambda a: a.startswith(prefix), clip_sd.keys())) + current_clip_sd = {} + for x in k: + current_clip_sd[x] = clip_sd.pop(x) + if len(current_clip_sd) == 0: + continue + + p = prefix[:-1] + replace_prefix = {} + filename_prefix_ = filename_prefix + if len(p) > 0: + filename_prefix_ = "{}_{}".format(filename_prefix_, p) + replace_prefix[prefix] = "" + replace_prefix["transformer."] = "" + + full_output_folder, filename, counter, subfolder, filename_prefix_ = folder_paths.get_save_image_path(filename_prefix_, self.output_dir) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + current_clip_sd = comfy.utils.state_dict_prefix_replace(current_clip_sd, replace_prefix) + + comfy.utils.save_torch_file(current_clip_sd, output_checkpoint, metadata=metadata) + return {} + class VAESave: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -220,5 +276,6 @@ def save(self, vae, filename_prefix, prompt=None, extra_pnginfo=None): "ModelMergeAdd": ModelAdd, "CheckpointSave": CheckpointSave, "CLIPMergeSimple": CLIPMergeSimple, + "CLIPSave": CLIPSave, "VAESave": VAESave, } From be903eb2e2921f03a3a03dc9d6b0c6437ae201f5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 10 Oct 2023 01:25:47 -0400 Subject: [PATCH 072/420] Add default CheckpointSave, CLIPSave and VAESave paths to model paths. --- main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/main.py b/main.py index 875ea1aa908..1100a07f42a 100644 --- a/main.py +++ b/main.py @@ -175,6 +175,11 @@ def load_extra_path_config(yaml_path): print(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) + #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes + folder_paths.add_model_folder_path("checkpoints", os.path.join(folder_paths.get_output_directory(), "checkpoints")) + folder_paths.add_model_folder_path("clip", os.path.join(folder_paths.get_output_directory(), "clip")) + folder_paths.add_model_folder_path("vae", os.path.join(folder_paths.get_output_directory(), "vae")) + if args.input_directory: input_dir = os.path.abspath(args.input_directory) print(f"Setting input directory to: {input_dir}") From 5e885bd9c822a3cc7d50d75a40958265f497cd03 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 10 Oct 2023 21:46:53 -0400 Subject: [PATCH 073/420] Cleanup. --- comfy/taesd/taesd.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py index 92f74c11ad7..8df1f160915 100644 --- a/comfy/taesd/taesd.py +++ b/comfy/taesd/taesd.py @@ -6,6 +6,8 @@ import torch import torch.nn as nn +import comfy.utils + def conv(n_in, n_out, **kwargs): return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs) @@ -50,17 +52,9 @@ def __init__(self, encoder_path="taesd_encoder.pth", decoder_path="taesd_decoder self.encoder = Encoder() self.decoder = Decoder() if encoder_path is not None: - if encoder_path.lower().endswith(".safetensors"): - import safetensors.torch - self.encoder.load_state_dict(safetensors.torch.load_file(encoder_path, device="cpu")) - else: - self.encoder.load_state_dict(torch.load(encoder_path, map_location="cpu", weights_only=True)) + self.encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True)) if decoder_path is not None: - if decoder_path.lower().endswith(".safetensors"): - import safetensors.torch - self.decoder.load_state_dict(safetensors.torch.load_file(decoder_path, device="cpu")) - else: - self.decoder.load_state_dict(torch.load(decoder_path, map_location="cpu", weights_only=True)) + self.decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True)) @staticmethod def scale_latents(x): From 8cc75c64ff7188ce72cd4ba595119586e425c09f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 11 Oct 2023 01:34:38 -0400 Subject: [PATCH 074/420] Let unet wrapper functions have .to attributes. --- comfy/model_patcher.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index ba505221e77..50b725b8611 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -107,6 +107,10 @@ def model_patches_to(self, device): for k in patch_list: if hasattr(patch_list[k], "to"): patch_list[k] = patch_list[k].to(device) + if "unet_wrapper_function" in self.model_options: + wrap_func = self.model_options["unet_wrapper_function"] + if hasattr(wrap_func, "to"): + self.model_options["unet_wrapper_function"] = wrap_func.to(device) def model_dtype(self): if hasattr(self.model, "get_dtype"): From 1a4bd9e9a6fc2b364ebb547dbd80736548cf9f5c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 11 Oct 2023 15:47:53 -0400 Subject: [PATCH 075/420] Refactor the attention functions. There's no reason for the whole CrossAttention object to be repeated when only the operation in the middle changes. --- comfy/ldm/modules/attention.py | 568 +++++++------------- comfy/ldm/modules/diffusionmodules/model.py | 13 - 2 files changed, 201 insertions(+), 380 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index fcae6b66a79..3230cfaf500 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -94,360 +94,222 @@ def zero_module(module): def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) +def attention_basic(q, k, v, heads, mask=None): + h = heads + scale = (q.shape[-1] // heads) ** -0.5 + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION =="fp32": + with torch.autocast(enabled=False, device_type = 'cuda'): + q, k = q.float(), k.float() + sim = einsum('b i d, b j d -> b i j', q, k) * scale + else: + sim = einsum('b i d, b j d -> b i j', q, k) * scale -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttentionBirchSan(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - - self.to_out = nn.Sequential( - operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, value=None, mask=None): - h = self.heads - - query = self.to_q(x) - context = default(context, x) - key = self.to_k(context) - if value is not None: - value = self.to_v(value) - else: - value = self.to_v(context) - - del context, x - - query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1) - key_t = key.transpose(1,2).unflatten(1, (self.heads, -1)).flatten(end_dim=1) - del key - value = value.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1) - - dtype = query.dtype - upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 - if upcast_attention: - bytes_per_token = torch.finfo(torch.float32).bits//8 - else: - bytes_per_token = torch.finfo(query.dtype).bits//8 - batch_x_heads, q_tokens, _ = query.shape - _, _, k_tokens = key_t.shape - qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens - - mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) - - chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD - - kv_chunk_size_min = None - - #not sure at all about the math here - #TODO: tweak this - if mem_free_total > 8192 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 4 - elif mem_free_total > 4096 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 2 - else: - query_chunk_size_x = 1024 - kv_chunk_size_min_x = None - kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024 - if kv_chunk_size_x < 1024: - kv_chunk_size_x = None - - if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: - # the big matmul fits into our memory limit; do everything in 1 chunk, - # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens - kv_chunk_size = k_tokens - else: - query_chunk_size = query_chunk_size_x - kv_chunk_size = kv_chunk_size_x - kv_chunk_size_min = kv_chunk_size_min_x - - hidden_states = efficient_dot_product_attention( - query, - key_t, - value, - query_chunk_size=query_chunk_size, - kv_chunk_size=kv_chunk_size, - kv_chunk_size_min=kv_chunk_size_min, - use_checkpoint=self.training, - upcast_attention=upcast_attention, - ) - - hidden_states = hidden_states.to(dtype) + del q, k - hidden_states = hidden_states.unflatten(0, (-1, self.heads)).transpose(1,2).flatten(start_dim=2) + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) - out_proj, dropout = self.to_out - hidden_states = out_proj(hidden_states) - hidden_states = dropout(hidden_states) + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) - return hidden_states + out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return out -class CrossAttentionDoggettx(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) +def attention_sub_quad(query, key, value, heads, mask=None): + scale = (query.shape[-1] // heads) ** -0.5 + query = query.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) + key_t = key.transpose(1,2).unflatten(1, (heads, -1)).flatten(end_dim=1) + del key + value = value.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) - self.scale = dim_head ** -0.5 - self.heads = heads + dtype = query.dtype + upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 + if upcast_attention: + bytes_per_token = torch.finfo(torch.float32).bits//8 + else: + bytes_per_token = torch.finfo(query.dtype).bits//8 + batch_x_heads, q_tokens, _ = query.shape + _, _, k_tokens = key_t.shape + qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens - self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) + mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) - self.to_out = nn.Sequential( - operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), - nn.Dropout(dropout) - ) + chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD - def forward(self, x, context=None, value=None, mask=None): - h = self.heads + kv_chunk_size_min = None - q_in = self.to_q(x) - context = default(context, x) - k_in = self.to_k(context) - if value is not None: - v_in = self.to_v(value) - del value - else: - v_in = self.to_v(context) - del context, x - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) - del q_in, k_in, v_in - - r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - - mem_free_total = model_management.get_free_memory(q.device) - - gb = 1024 ** 3 - tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() - modifier = 3 if q.element_size() == 2 else 2.5 - mem_required = tensor_size * modifier - steps = 1 - - - if mem_required > mem_free_total: - steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) - # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " - # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - - if steps > 64: - max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 - raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' - f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free') - - # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size) - first_op_done = False - cleared_cache = False - while True: - try: - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - if _ATTN_PRECISION =="fp32": - with torch.autocast(enabled=False, device_type = 'cuda'): - s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * self.scale - else: - s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale - first_op_done = True - - s2 = s1.softmax(dim=-1).to(v.dtype) - del s1 - - r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) - del s2 - break - except model_management.OOM_EXCEPTION as e: - if first_op_done == False: - model_management.soft_empty_cache(True) - if cleared_cache == False: - cleared_cache = True - print("out of memory error, emptying cache and trying again") - continue - steps *= 2 - if steps > 64: - raise e - print("out of memory error, increasing steps and trying again", steps) + #not sure at all about the math here + #TODO: tweak this + if mem_free_total > 8192 * 1024 * 1024 * 1.3: + query_chunk_size_x = 1024 * 4 + elif mem_free_total > 4096 * 1024 * 1024 * 1.3: + query_chunk_size_x = 1024 * 2 + else: + query_chunk_size_x = 1024 + kv_chunk_size_min_x = None + kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024 + if kv_chunk_size_x < 1024: + kv_chunk_size_x = None + + if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: + # the big matmul fits into our memory limit; do everything in 1 chunk, + # i.e. send it down the unchunked fast-path + query_chunk_size = q_tokens + kv_chunk_size = k_tokens + else: + query_chunk_size = query_chunk_size_x + kv_chunk_size = kv_chunk_size_x + kv_chunk_size_min = kv_chunk_size_min_x + + hidden_states = efficient_dot_product_attention( + query, + key_t, + value, + query_chunk_size=query_chunk_size, + kv_chunk_size=kv_chunk_size, + kv_chunk_size_min=kv_chunk_size_min, + use_checkpoint=False, + upcast_attention=upcast_attention, + ) + + hidden_states = hidden_states.to(dtype) + + hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2) + return hidden_states + +def attention_split(q, k, v, heads, mask=None): + scale = (q.shape[-1] // heads) ** -0.5 + h = heads + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + + mem_free_total = model_management.get_free_memory(q.device) + + gb = 1024 ** 3 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() + modifier = 3 if q.element_size() == 2 else 2.5 + mem_required = tensor_size * modifier + steps = 1 + + + if mem_required > mem_free_total: + steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " + # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") + + if steps > 64: + max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 + raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' + f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free') + + # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size) + first_op_done = False + cleared_cache = False + while True: + try: + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + if _ATTN_PRECISION =="fp32": + with torch.autocast(enabled=False, device_type = 'cuda'): + s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale else: + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale + first_op_done = True + + s2 = s1.softmax(dim=-1).to(v.dtype) + del s1 + + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + break + except model_management.OOM_EXCEPTION as e: + if first_op_done == False: + model_management.soft_empty_cache(True) + if cleared_cache == False: + cleared_cache = True + print("out of memory error, emptying cache and trying again") + continue + steps *= 2 + if steps > 64: raise e + print("out of memory error, increasing steps and trying again", steps) + else: + raise e + + del q, k, v + + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 + return r2 + +def attention_xformers(q, k, v, heads, mask=None): + b, _, _ = q.shape + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, t.shape[1], heads, -1) + .permute(0, 2, 1, 3) + .reshape(b * heads, t.shape[1], -1) + .contiguous(), + (q, k, v), + ) + + # actually compute the attention, what we cannot get enough of + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) + + if exists(mask): + raise NotImplementedError + out = ( + out.unsqueeze(0) + .reshape(b, heads, out.shape[1], -1) + .permute(0, 2, 1, 3) + .reshape(b, out.shape[1], -1) + ) + return out + +def attention_pytorch(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map( + lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2), + (q, k, v), + ) + + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + + if exists(mask): + raise NotImplementedError + out = ( + out.transpose(1, 2).reshape(b, -1, heads * dim_head) + ) + return out + +optimized_attention = attention_basic - del q, k, v - - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - - return self.to_out(r2) +if model_management.xformers_enabled(): + print("Using xformers cross attention") + optimized_attention = attention_xformers +elif model_management.pytorch_attention_enabled(): + print("Using pytorch cross attention") + optimized_attention = attention_pytorch +else: + if args.use_split_cross_attention: + print("Using split optimization for cross attention") + optimized_attention = attention_split + else: + print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") + optimized_attention = attention_sub_quad class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - - self.to_out = nn.Sequential( - operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, value=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - if value is not None: - v = self.to_v(value) - del value - else: - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - # force cast to fp32 to avoid overflowing - if _ATTN_PRECISION =="fp32": - with torch.autocast(enabled=False, device_type = 'cuda'): - q, k = q.float(), k.float() - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - else: - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - del q, k - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - sim = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', sim, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - -class MemoryEfficientCrossAttention(nn.Module): - # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.heads = heads - self.dim_head = dim_head - - self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) - - self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) - self.attention_op: Optional[Any] = None - - def forward(self, x, context=None, value=None, mask=None): - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - if value is not None: - v = self.to_v(value) - del value - else: - v = self.to_v(context) - - b, _, _ = q.shape - q, k, v = map( - lambda t: t.unsqueeze(3) - .reshape(b, t.shape[1], self.heads, self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b * self.heads, t.shape[1], self.dim_head) - .contiguous(), - (q, k, v), - ) - - # actually compute the attention, what we cannot get enough of - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) - - if exists(mask): - raise NotImplementedError - out = ( - out.unsqueeze(0) - .reshape(b, self.heads, out.shape[1], self.dim_head) - .permute(0, 2, 1, 3) - .reshape(b, out.shape[1], self.heads * self.dim_head) - ) - return self.to_out(out) - -class CrossAttentionPytorch(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() inner_dim = dim_head * heads @@ -461,7 +323,6 @@ def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0. self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device) self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout)) - self.attention_op: Optional[Any] = None def forward(self, x, context=None, value=None, mask=None): q = self.to_q(x) @@ -473,36 +334,9 @@ def forward(self, x, context=None, value=None, mask=None): else: v = self.to_v(context) - b, _, _ = q.shape - q, k, v = map( - lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2), - (q, k, v), - ) - - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) - - if exists(mask): - raise NotImplementedError - out = ( - out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head) - ) - + out = optimized_attention(q, k, v, self.heads, mask) return self.to_out(out) -if model_management.xformers_enabled(): - print("Using xformers cross attention") - CrossAttention = MemoryEfficientCrossAttention -elif model_management.pytorch_attention_enabled(): - print("Using pytorch cross attention") - CrossAttention = CrossAttentionPytorch -else: - if args.use_split_cross_attention: - print("Using split optimization for cross attention") - CrossAttention = CrossAttentionDoggettx - else: - print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") - CrossAttention = CrossAttentionBirchSan - class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 5f38640c3d8..e6cf954ff5c 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -6,7 +6,6 @@ from einops import rearrange from typing import Optional, Any -from ..attention import MemoryEfficientCrossAttention from comfy import model_management import comfy.ops @@ -352,15 +351,6 @@ def forward(self, x): out = self.proj_out(out) return x+out -class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention): - def forward(self, x, context=None, mask=None): - b, c, h, w = x.shape - x = rearrange(x, 'b c h w -> b (h w) c') - out = super().forward(x, context=context, mask=mask) - out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c) - return x + out - - def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' if model_management.xformers_enabled_vae() and attn_type == "vanilla": @@ -376,9 +366,6 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): return MemoryEfficientAttnBlock(in_channels) elif attn_type == "vanilla-pytorch": return MemoryEfficientAttnBlockPytorch(in_channels) - elif type == "memory-efficient-cross-attn": - attn_kwargs["query_dim"] = in_channels - return MemoryEfficientCrossAttentionWrapper(**attn_kwargs) elif attn_type == "none": return nn.Identity(in_channels) else: From ac7d8cfa875e08623993da8109cc73a68df42379 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 11 Oct 2023 20:24:17 -0400 Subject: [PATCH 076/420] Allow attn_mask in attention_pytorch. --- comfy/ldm/modules/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 3230cfaf500..ac0d9c8c5df 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -284,7 +284,7 @@ def attention_pytorch(q, k, v, heads, mask=None): (q, k, v), ) - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) if exists(mask): raise NotImplementedError From 20d3852aa1a23db24b288531ef07c26b49308629 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 11 Oct 2023 20:35:50 -0400 Subject: [PATCH 077/420] Pull some small changes from the other repo. --- comfy/model_management.py | 5 +++-- comfy/utils.py | 4 ++++ comfy_extras/nodes_custom_sampler.py | 3 ++- execution.py | 19 ++++++++++--------- folder_paths.py | 2 ++ nodes.py | 2 +- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 8b896372687..3b43b21acd3 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -354,6 +354,8 @@ def load_models_gpu(models, memory_required=0): current_loaded_models.insert(0, current_loaded_models.pop(index)) models_already_loaded.append(loaded_model) else: + if hasattr(x, "model"): + print(f"Requested to load {x.model.__class__.__name__}") models_to_load.append(loaded_model) if len(models_to_load) == 0: @@ -363,7 +365,7 @@ def load_models_gpu(models, memory_required=0): free_memory(extra_mem, d, models_already_loaded) return - print("loading new") + print(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}") total_memory_required = {} for loaded_model in models_to_load: @@ -405,7 +407,6 @@ def load_model_gpu(model): def cleanup_models(): to_delete = [] for i in range(len(current_loaded_models)): - print(sys.getrefcount(current_loaded_models[i].model)) if sys.getrefcount(current_loaded_models[i].model) <= 2: to_delete = [i] + to_delete diff --git a/comfy/utils.py b/comfy/utils.py index 7843b58ccad..df016ef9e2e 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -408,6 +408,10 @@ def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_am output[b:b+1] = out/out_div return output +PROGRESS_BAR_ENABLED = True +def set_progress_bar_enabled(enabled): + global PROGRESS_BAR_ENABLED + PROGRESS_BAR_ENABLED = enabled PROGRESS_BAR_HOOK = None def set_progress_bar_global_hook(function): diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 9391c714747..b52ad8fbd70 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -3,6 +3,7 @@ from comfy.k_diffusion import sampling as k_diffusion_sampling import latent_preview import torch +import comfy.utils class BasicScheduler: @@ -219,7 +220,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, x0_output = {} callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output) - disable_pbar = False + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed) out = latent.copy() diff --git a/execution.py b/execution.py index 5f5d6c73834..918c2bc5cc3 100644 --- a/execution.py +++ b/execution.py @@ -2,6 +2,7 @@ import sys import copy import json +import logging import threading import heapq import traceback @@ -156,7 +157,7 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute if server.client_id is not None: server.send_sync("executed", { "node": unique_id, "output": output_ui, "prompt_id": prompt_id }, server.client_id) except comfy.model_management.InterruptProcessingException as iex: - print("Processing interrupted") + logging.info("Processing interrupted") # skip formatting inputs/outputs error_details = { @@ -177,8 +178,8 @@ def recursive_execute(server, prompt, outputs, current_item, extra_data, execute for node_id, node_outputs in outputs.items(): output_data_formatted[node_id] = [[format_value(x) for x in l] for l in node_outputs] - print("!!! Exception during processing !!!") - print(traceback.format_exc()) + logging.error("!!! Exception during processing !!!") + logging.error(traceback.format_exc()) error_details = { "node_id": unique_id, @@ -636,11 +637,11 @@ def validate_prompt(prompt): if valid is True: good_outputs.add(o) else: - print(f"Failed to validate prompt for output {o}:") + logging.error(f"Failed to validate prompt for output {o}:") if len(reasons) > 0: - print("* (prompt):") + logging.error("* (prompt):") for reason in reasons: - print(f" - {reason['message']}: {reason['details']}") + logging.error(f" - {reason['message']}: {reason['details']}") errors += [(o, reasons)] for node_id, result in validated.items(): valid = result[0] @@ -656,11 +657,11 @@ def validate_prompt(prompt): "dependent_outputs": [], "class_type": class_type } - print(f"* {class_type} {node_id}:") + logging.error(f"* {class_type} {node_id}:") for reason in reasons: - print(f" - {reason['message']}: {reason['details']}") + logging.error(f" - {reason['message']}: {reason['details']}") node_errors[node_id]["dependent_outputs"].append(o) - print("Output will be ignored") + logging.error("Output will be ignored") if len(good_outputs) == 0: errors_list = [] diff --git a/folder_paths.py b/folder_paths.py index 898513b0e1f..5d121b443bd 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -29,6 +29,8 @@ folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) +folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""}) + output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") diff --git a/nodes.py b/nodes.py index 16bf07ccaa2..208cbc84f59 100644 --- a/nodes.py +++ b/nodes.py @@ -1202,7 +1202,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, noise_mask = latent["noise_mask"] callback = latent_preview.prepare_callback(model, steps) - disable_pbar = False + disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) From 88733c997fd807a572d4a214d2c15fc5dd17b3c6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 11 Oct 2023 21:29:03 -0400 Subject: [PATCH 078/420] pytorch_attention_enabled can now return True when xformers is enabled. --- comfy/ldm/modules/diffusionmodules/model.py | 2 +- comfy/model_management.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index e6cf954ff5c..6576df4b68e 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -355,7 +355,7 @@ def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' if model_management.xformers_enabled_vae() and attn_type == "vanilla": attn_type = "vanilla-xformers" - if model_management.pytorch_attention_enabled() and attn_type == "vanilla": + elif model_management.pytorch_attention_enabled() and attn_type == "vanilla": attn_type = "vanilla-pytorch" print(f"making attention of type '{attn_type}' with {in_channels} in_channels") if attn_type == "vanilla": diff --git a/comfy/model_management.py b/comfy/model_management.py index 3b43b21acd3..3c390d9ca0a 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -154,14 +154,18 @@ def is_nvidia(): return True return False -ENABLE_PYTORCH_ATTENTION = args.use_pytorch_cross_attention +ENABLE_PYTORCH_ATTENTION = False +if args.use_pytorch_cross_attention: + ENABLE_PYTORCH_ATTENTION = True + XFORMERS_IS_AVAILABLE = False + VAE_DTYPE = torch.float32 try: if is_nvidia(): torch_version = torch.version.__version__ if int(torch_version[0]) >= 2: - if ENABLE_PYTORCH_ATTENTION == False and XFORMERS_IS_AVAILABLE == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False: + if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False: ENABLE_PYTORCH_ATTENTION = True if torch.cuda.is_bf16_supported(): VAE_DTYPE = torch.bfloat16 @@ -186,7 +190,6 @@ def is_nvidia(): torch.backends.cuda.enable_math_sdp(True) torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp(True) - XFORMERS_IS_AVAILABLE = False if args.lowvram: set_vram_to = VRAMState.LOW_VRAM From 41d2c5660dab2404c7db78ff14c7faecd6c57e1f Mon Sep 17 00:00:00 2001 From: Chris Date: Thu, 12 Oct 2023 14:26:53 +1100 Subject: [PATCH 079/420] add query --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 7698d0f1173..3cf3585d28c 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1592,7 +1592,7 @@ export class ComfyApp { all_inputs = all_inputs.concat(Object.keys(parent.inputs)) for (let parent_input in all_inputs) { parent_input = all_inputs[parent_input]; - if (parent.inputs[parent_input].type === node.inputs[i].type) { + if (parent.inputs[parent_input]?.type === node.inputs[i].type) { link = parent.getInputLink(parent_input); if (link) { parent = parent.getInputNode(parent_input); From 851a4bdb803871d62ceca4249036da34449891ed Mon Sep 17 00:00:00 2001 From: Nick Teeple <64399276+nickteeple@users.noreply.github.com> Date: Thu, 12 Oct 2023 21:26:27 +0800 Subject: [PATCH 080/420] Update extra_model_paths.yaml.example with comfy specific example --- extra_model_paths.yaml.example | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example index 36078fffc7b..0870811f3f0 100644 --- a/extra_model_paths.yaml.example +++ b/extra_model_paths.yaml.example @@ -1,5 +1,20 @@ #Rename this to extra_model_paths.yaml and ComfyUI will load it +#config for comfyui +#your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc. +#in this example, we make a central folder called c:/machine-learning/ and replicate the ComfyUI models folder structure inside +comfyui: + base_path: c:/machine-learning/ + checkpoints: models/checkpoints/ + clip: models/clip/ + clip_vision: models/clip_vision/ + configs: models/configs/ + controlnet: models/controlnet/ + embeddings: models/embeddings/ + loras: models/loras/ + upscale_models: models/upscale_models/ + vae: models/vae/ + #config for a1111 ui #all you have to do is change the base_path to where yours is installed a111: From fee3b0c0700aedaafacd72ef90d49c7be8c1a003 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 12 Oct 2023 20:54:43 -0400 Subject: [PATCH 081/420] Move and comment out. --- extra_model_paths.yaml.example | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/extra_model_paths.yaml.example b/extra_model_paths.yaml.example index 0870811f3f0..846d04dbeb4 100644 --- a/extra_model_paths.yaml.example +++ b/extra_model_paths.yaml.example @@ -1,19 +1,5 @@ #Rename this to extra_model_paths.yaml and ComfyUI will load it -#config for comfyui -#your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc. -#in this example, we make a central folder called c:/machine-learning/ and replicate the ComfyUI models folder structure inside -comfyui: - base_path: c:/machine-learning/ - checkpoints: models/checkpoints/ - clip: models/clip/ - clip_vision: models/clip_vision/ - configs: models/configs/ - controlnet: models/controlnet/ - embeddings: models/embeddings/ - loras: models/loras/ - upscale_models: models/upscale_models/ - vae: models/vae/ #config for a1111 ui #all you have to do is change the base_path to where yours is installed @@ -34,6 +20,21 @@ a111: hypernetworks: models/hypernetworks controlnet: models/ControlNet +#config for comfyui +#your base path should be either an existing comfy install or a central folder where you store all of your models, loras, etc. + +#comfyui: +# base_path: path/to/comfyui/ +# checkpoints: models/checkpoints/ +# clip: models/clip/ +# clip_vision: models/clip_vision/ +# configs: models/configs/ +# controlnet: models/controlnet/ +# embeddings: models/embeddings/ +# loras: models/loras/ +# upscale_models: models/upscale_models/ +# vae: models/vae/ + #other_ui: # base_path: path/to/ui # checkpoints: models/checkpoints From 87097a11c32ac7393093055c4ed1432553995560 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 13 Oct 2023 12:26:54 -0300 Subject: [PATCH 082/420] Fix FeatherMask --- comfy_extras/nodes_mask.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 9b0b289c189..3cc4cfd7612 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -282,10 +282,10 @@ def INPUT_TYPES(cls): def feather(self, mask, left, top, right, bottom): output = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).clone() - left = min(left, output.shape[1]) - right = min(right, output.shape[1]) - top = min(top, output.shape[0]) - bottom = min(bottom, output.shape[0]) + left = min(left, output.shape[-1]) + right = min(right, output.shape[-1]) + top = min(top, output.shape[-2]) + bottom = min(bottom, output.shape[-2]) for x in range(left): feather_rate = (x + 1.0) / left From b5fa3d28d7a9242a73c40b3b37e526267cbd67fd Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 13 Oct 2023 13:40:53 -0300 Subject: [PATCH 083/420] Fix MaskComposite --- comfy_extras/nodes_mask.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index 3cc4cfd7612..d8c65c2b6b9 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -240,8 +240,8 @@ def combine(self, destination, source, x, y, operation): right, bottom = (min(left + source.shape[-1], destination.shape[-1]), min(top + source.shape[-2], destination.shape[-2])) visible_width, visible_height = (right - left, bottom - top,) - source_portion = source[:visible_height, :visible_width] - destination_portion = destination[top:bottom, left:right] + source_portion = source[:, :visible_height, :visible_width] + destination_portion = destination[:, top:bottom, left:right] if operation == "multiply": output[:, top:bottom, left:right] = destination_portion * source_portion From 9a55dadb4c9c82d772d934125112b8b0613e4f28 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 13 Oct 2023 14:35:21 -0400 Subject: [PATCH 084/420] Refactor code so model can be a dtype other than fp32 or fp16. --- comfy/cldm/cldm.py | 6 ++-- comfy/controlnet.py | 11 +++---- .../modules/diffusionmodules/openaimodel.py | 6 ++-- comfy/model_detection.py | 32 +++++++++---------- comfy/model_management.py | 5 +++ comfy/sd.py | 20 ++++++------ 6 files changed, 39 insertions(+), 41 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 25148313117..f982d648ce4 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -34,8 +34,7 @@ def __init__( dims=2, num_classes=None, use_checkpoint=False, - use_fp16=False, - use_bf16=False, + dtype=torch.float32, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, @@ -108,8 +107,7 @@ def __init__( self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.dtype = th.bfloat16 if use_bf16 else self.dtype + self.dtype = dtype self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample diff --git a/comfy/controlnet.py b/comfy/controlnet.py index ea219c7e560..73a40acfa24 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -292,8 +292,8 @@ def load_controlnet(ckpt_path, model=None): controlnet_config = None if "controlnet_cond_embedding.conv_in.weight" in controlnet_data: #diffusers format - use_fp16 = comfy.model_management.should_use_fp16() - controlnet_config = comfy.model_detection.unet_config_from_diffusers_unet(controlnet_data, use_fp16) + unet_dtype = comfy.model_management.unet_dtype() + controlnet_config = comfy.model_detection.unet_config_from_diffusers_unet(controlnet_data, unet_dtype) diffusers_keys = comfy.utils.unet_to_diffusers(controlnet_config) diffusers_keys["controlnet_mid_block.weight"] = "middle_block_out.0.weight" diffusers_keys["controlnet_mid_block.bias"] = "middle_block_out.0.bias" @@ -353,8 +353,8 @@ def load_controlnet(ckpt_path, model=None): return net if controlnet_config is None: - use_fp16 = comfy.model_management.should_use_fp16() - controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, use_fp16, True).unet_config + unet_dtype = comfy.model_management.unet_dtype() + controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, unet_dtype, True).unet_config controlnet_config.pop("out_channels") controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1] control_model = comfy.cldm.cldm.ControlNet(**controlnet_config) @@ -383,8 +383,7 @@ class WeightsLoader(torch.nn.Module): missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False) print(missing, unexpected) - if use_fp16: - control_model = control_model.half() + control_model = control_model.to(unet_dtype) global_average_pooling = False filename = os.path.splitext(ckpt_path)[0] diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index b42637c821a..bf58a4045f7 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -296,8 +296,7 @@ def __init__( dims=2, num_classes=None, use_checkpoint=False, - use_fp16=False, - use_bf16=False, + dtype=th.float32, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, @@ -370,8 +369,7 @@ def __init__( self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.dtype = th.bfloat16 if use_bf16 else self.dtype + self.dtype = dtype self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 787c78575ae..0ff2e7fb53f 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -14,7 +14,7 @@ def count_blocks(state_dict_keys, prefix_string): count += 1 return count -def detect_unet_config(state_dict, key_prefix, use_fp16): +def detect_unet_config(state_dict, key_prefix, dtype): state_dict_keys = list(state_dict.keys()) unet_config = { @@ -32,7 +32,7 @@ def detect_unet_config(state_dict, key_prefix, use_fp16): else: unet_config["adm_in_channels"] = None - unet_config["use_fp16"] = use_fp16 + unet_config["dtype"] = dtype model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0] in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1] @@ -116,15 +116,15 @@ def model_config_from_unet_config(unet_config): print("no match", unet_config) return None -def model_config_from_unet(state_dict, unet_key_prefix, use_fp16, use_base_if_no_match=False): - unet_config = detect_unet_config(state_dict, unet_key_prefix, use_fp16) +def model_config_from_unet(state_dict, unet_key_prefix, dtype, use_base_if_no_match=False): + unet_config = detect_unet_config(state_dict, unet_key_prefix, dtype) model_config = model_config_from_unet_config(unet_config) if model_config is None and use_base_if_no_match: return comfy.supported_models_base.BASE(unet_config) else: return model_config -def unet_config_from_diffusers_unet(state_dict, use_fp16): +def unet_config_from_diffusers_unet(state_dict, dtype): match = {} attention_resolutions = [] @@ -147,47 +147,47 @@ def unet_config_from_diffusers_unet(state_dict, use_fp16): match["adm_in_channels"] = state_dict["add_embedding.linear_1.weight"].shape[1] SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2560, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 384, + 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384, 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280, "num_head_channels": 64} SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, + 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2048, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 1536, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024} SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, + 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, "num_heads": 8} SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [4], 'transformer_depth': [0, 0, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 4, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [], 'transformer_depth': [0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0, 'use_linear_in_transformer': True, "num_head_channels": 64, 'context_dim': 1} SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'use_fp16': use_fp16, 'in_channels': 9, 'model_channels': 320, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} @@ -203,8 +203,8 @@ def unet_config_from_diffusers_unet(state_dict, use_fp16): return unet_config return None -def model_config_from_diffusers_unet(state_dict, use_fp16): - unet_config = unet_config_from_diffusers_unet(state_dict, use_fp16) +def model_config_from_diffusers_unet(state_dict, dtype): + unet_config = unet_config_from_diffusers_unet(state_dict, dtype) if unet_config is not None: return model_config_from_unet_config(unet_config) return None diff --git a/comfy/model_management.py b/comfy/model_management.py index 3c390d9ca0a..1161c24478c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -448,6 +448,11 @@ def unet_inital_load_device(parameters, dtype): else: return cpu_dev +def unet_dtype(device=None, model_params=0): + if should_use_fp16(device=device, model_params=model_params): + return torch.float16 + return torch.float32 + def text_encoder_offload_device(): if args.gpu_only: return get_torch_device() diff --git a/comfy/sd.py b/comfy/sd.py index cfd6fb3cb56..fd8b94df895 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -327,7 +327,9 @@ def load_checkpoint(config_path=None, ckpt_path=None, output_vae=True, output_cl if "params" in model_config_params["unet_config"]: unet_config = model_config_params["unet_config"]["params"] if "use_fp16" in unet_config: - fp16 = unet_config["use_fp16"] + fp16 = unet_config.pop("use_fp16") + if fp16: + unet_config["dtype"] = torch.float16 noise_aug_config = None if "noise_aug_config" in model_config_params: @@ -405,12 +407,12 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o clip_target = None parameters = comfy.utils.calculate_parameters(sd, "model.diffusion_model.") - fp16 = model_management.should_use_fp16(model_params=parameters) + unet_dtype = model_management.unet_dtype(model_params=parameters) class WeightsLoader(torch.nn.Module): pass - model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", fp16) + model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", unet_dtype) if model_config is None: raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path)) @@ -418,12 +420,8 @@ class WeightsLoader(torch.nn.Module): if output_clipvision: clipvision = clip_vision.load_clipvision_from_sd(sd, model_config.clip_vision_prefix, True) - dtype = torch.float32 - if fp16: - dtype = torch.float16 - if output_model: - inital_load_device = model_management.unet_inital_load_device(parameters, dtype) + inital_load_device = model_management.unet_inital_load_device(parameters, unet_dtype) offload_device = model_management.unet_offload_device() model = model_config.get_model(sd, "model.diffusion_model.", device=inital_load_device) model.load_model_weights(sd, "model.diffusion_model.") @@ -458,15 +456,15 @@ class WeightsLoader(torch.nn.Module): def load_unet(unet_path): #load unet in diffusers format sd = comfy.utils.load_torch_file(unet_path) parameters = comfy.utils.calculate_parameters(sd) - fp16 = model_management.should_use_fp16(model_params=parameters) + unet_dtype = model_management.unet_dtype(model_params=parameters) if "input_blocks.0.0.weight" in sd: #ldm - model_config = model_detection.model_config_from_unet(sd, "", fp16) + model_config = model_detection.model_config_from_unet(sd, "", unet_dtype) if model_config is None: raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) new_sd = sd else: #diffusers - model_config = model_detection.model_config_from_diffusers_unet(sd, fp16) + model_config = model_detection.model_config_from_diffusers_unet(sd, unet_dtype) if model_config is None: print("ERROR UNSUPPORTED UNET", unet_path) return None From fd4c5f07e7b4d034c7f9fa96d6853f4aeec7b2de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 13 Oct 2023 14:51:10 -0400 Subject: [PATCH 085/420] Add a --bf16-unet to test running the unet in bf16. --- comfy/cli_args.py | 2 ++ comfy/model_management.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 35d44164f1c..d86557646f1 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -53,6 +53,8 @@ def __call__(self, parser, namespace, values, option_string=None): fp_group.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.") +parser.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.") + fpvae_group = parser.add_mutually_exclusive_group() fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.") fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 1161c24478c..c24c7b27e3d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -449,6 +449,8 @@ def unet_inital_load_device(parameters, dtype): return cpu_dev def unet_dtype(device=None, model_params=0): + if args.bf16_unet: + return torch.bfloat16 if should_use_fp16(device=device, model_params=model_params): return torch.float16 return torch.float32 From 25f0f4e9c8fdbad43311b8e5161b3e6efa9d58d9 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 14 Oct 2023 11:54:33 -0300 Subject: [PATCH 086/420] Shortcut Alt + C to collapse/uncollapse selected nodes --- README.md | 1 + web/scripts/app.js | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/README.md b/README.md index 6bef25cee0b..d622c907209 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git | Ctrl + S | Save workflow | | Ctrl + O | Load workflow | | Ctrl + A | Select all nodes | +| Alt + C | Collapse/uncollapse selected nodes | | Ctrl + M | Mute/unmute selected nodes | | Ctrl + B | Bypass selected nodes (acts like the node was removed from the graph and the wires reconnected through) | | Delete/Backspace | Delete selected nodes | diff --git a/web/scripts/app.js b/web/scripts/app.js index 3cf3585d28c..1a07d69bcf7 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -928,6 +928,16 @@ export class ComfyApp { block_default = true; } + // Alt + C collapse/uncollapse + if (e.key === 'c' && e.altKey) { + if (this.selected_nodes) { + for (var i in this.selected_nodes) { + this.selected_nodes[i].collapse() + } + } + block_default = true; + } + // Ctrl+C Copy if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) { // Trigger onCopy From 2e6270e328d4b43f9d76172f21f6d7636bf4a452 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 14 Oct 2023 11:56:44 -0300 Subject: [PATCH 087/420] Stop auto queue on error --- web/scripts/ui.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 1e7920167a6..c3b3fbda114 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -809,7 +809,8 @@ export class ComfyUI { if ( this.lastQueueSize != 0 && status.exec_info.queue_remaining == 0 && - document.getElementById("autoQueueCheckbox").checked + document.getElementById("autoQueueCheckbox").checked && + ! app.lastExecutionError ) { app.queuePrompt(0, this.batchCount); } From 8d04978298d8bb2bcb969608a994fe4ef39be3ee Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 14 Oct 2023 11:59:35 -0300 Subject: [PATCH 088/420] Allow all extensions if extension list is empty --- folder_paths.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/folder_paths.py b/folder_paths.py index 5d121b443bd..4a38deec06f 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -146,7 +146,7 @@ def recursive_search(directory, excluded_dir_names=None): return result, dirs def filter_files_extensions(files, extensions): - return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions, files))) + return sorted(list(filter(lambda a: os.path.splitext(a)[-1].lower() in extensions or len(extensions) == 0, files))) From a7b65b9505b0504fefc7b57a5e80a243bd6630cf Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 14 Oct 2023 12:11:49 -0300 Subject: [PATCH 089/420] Group menu option select nodes --- web/extensions/core/groupOptions.js | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/web/extensions/core/groupOptions.js b/web/extensions/core/groupOptions.js index 1d935e90aef..d523737dc7e 100644 --- a/web/extensions/core/groupOptions.js +++ b/web/extensions/core/groupOptions.js @@ -38,6 +38,15 @@ app.registerExtension({ } } + options.push({ + content: "Select Nodes", + callback: () => { + this.selectNodes(nodesInGroup); + this.graph.change(); + this.canvas.focus(); + } + }); + // Modes // 0: Always // 1: On Event From 7e09e889e3d52eaa0e51144e6257e3baa2f9349e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 15 Oct 2023 02:22:22 -0400 Subject: [PATCH 090/420] Make clear that the old CheckpointLoader is deprecated. --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 208cbc84f59..d0e9ffcc5dc 100644 --- a/nodes.py +++ b/nodes.py @@ -1660,7 +1660,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "KSampler": "KSampler", "KSamplerAdvanced": "KSampler (Advanced)", # Loaders - "CheckpointLoader": "Load Checkpoint (With Config)", + "CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)", "CheckpointLoaderSimple": "Load Checkpoint", "VAELoader": "Load VAE", "LoraLoader": "Load LoRA", From bb064c97965da8af32e08d7ac00768f62c33cdab Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 16 Oct 2023 02:31:24 -0400 Subject: [PATCH 091/420] Add a separate optimized_attention_masked function. --- comfy/ldm/modules/attention.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index ac0d9c8c5df..9cd14a537a4 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -285,15 +285,14 @@ def attention_pytorch(q, k, v, heads, mask=None): ) out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) - - if exists(mask): - raise NotImplementedError out = ( out.transpose(1, 2).reshape(b, -1, heads * dim_head) ) return out + optimized_attention = attention_basic +optimized_attention_masked = attention_basic if model_management.xformers_enabled(): print("Using xformers cross attention") @@ -309,6 +308,9 @@ def attention_pytorch(q, k, v, heads, mask=None): print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") optimized_attention = attention_sub_quad +if model_management.pytorch_attention_enabled(): + optimized_attention_masked = attention_pytorch + class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() @@ -334,7 +336,10 @@ def forward(self, x, context=None, value=None, mask=None): else: v = self.to_v(context) - out = optimized_attention(q, k, v, self.heads, mask) + if mask is None: + out = optimized_attention(q, k, v, self.heads) + else: + out = optimized_attention_masked(q, k, v, self.heads, mask) return self.to_out(out) From 7d5d0fd577fd6c4ad5de8d07a71aa7599c457b70 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Mon, 16 Oct 2023 15:12:40 -0300 Subject: [PATCH 092/420] Group options - Add Group For Selected Nodes - Add Selected Nodes To Group - Fit Group To Nodes --- web/extensions/core/groupOptions.js | 71 +++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/web/extensions/core/groupOptions.js b/web/extensions/core/groupOptions.js index d523737dc7e..3ec0e3fd994 100644 --- a/web/extensions/core/groupOptions.js +++ b/web/extensions/core/groupOptions.js @@ -5,6 +5,49 @@ function setNodeMode(node, mode) { node.graph.change(); } +function addNodesToGroup(group, nodes=[]) { + var x1, y1, x2, y2; + var nx1, ny1, nx2, ny2; + var node; + + x1 = y1 = x2 = y2 = -1; + nx1 = ny1 = nx2 = ny2 = -1; + + for (var n of [group._nodes, nodes]) { + for (var i in n) { + node = n[i] + + nx1 = node.pos[0] + ny1 = node.pos[1] + nx2 = node.pos[0] + node.size[0] + ny2 = node.pos[1] + node.size[1] + + if (x1 == -1 || nx1 < x1) { + x1 = nx1; + } + + if (y1 == -1 || ny1 < y1) { + y1 = ny1; + } + + if (x2 == -1 || nx2 > x2) { + x2 = nx2; + } + + if (y2 == -1 || ny2 > y2) { + y2 = ny2; + } + } + } + + var padding = 10; + + y1 = y1 - Math.round(group.font_size * 2.7); + + group.pos = [x1 - padding, y1 - padding]; + group.size = [x2 - x1 + padding * 2, y2 - y1 + padding * 2]; +} + app.registerExtension({ name: "Comfy.GroupOptions", setup() { @@ -14,6 +57,17 @@ app.registerExtension({ const options = orig.apply(this, arguments); const group = this.graph.getGroupOnPos(this.graph_mouse[0], this.graph_mouse[1]); if (!group) { + options.push({ + content: "Add Group For Selected Nodes", + disabled: !Object.keys(app.canvas.selected_nodes || {}).length, + callback: () => { + var group = new LiteGraph.LGraphGroup(); + addNodesToGroup(group, this.selected_nodes) + app.canvas.graph.add(group); + this.graph.change(); + } + }); + return options; } @@ -38,6 +92,23 @@ app.registerExtension({ } } + options.push({ + content: "Add Selected Nodes To Group", + disabled: !Object.keys(app.canvas.selected_nodes || {}).length, + callback: () => { + addNodesToGroup(group, this.selected_nodes) + this.graph.change(); + } + }); + + options.push({ + content: "Fit Group To Nodes", + callback: () => { + addNodesToGroup(group) + this.graph.change(); + } + }); + options.push({ content: "Select Nodes", callback: () => { From e8c02219ee41424d076682841dd84a1ee9093190 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Mon, 16 Oct 2023 15:26:36 -0300 Subject: [PATCH 093/420] Fix add selected nodes to empty group --- web/extensions/core/groupOptions.js | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/web/extensions/core/groupOptions.js b/web/extensions/core/groupOptions.js index 3ec0e3fd994..d3d52bf2912 100644 --- a/web/extensions/core/groupOptions.js +++ b/web/extensions/core/groupOptions.js @@ -75,6 +75,15 @@ app.registerExtension({ group.recomputeInsideNodes(); const nodesInGroup = group._nodes; + options.push({ + content: "Add Selected Nodes To Group", + disabled: !Object.keys(app.canvas.selected_nodes || {}).length, + callback: () => { + addNodesToGroup(group, this.selected_nodes) + this.graph.change(); + } + }); + // No nodes in group, return default options if (nodesInGroup.length === 0) { return options; @@ -92,15 +101,6 @@ app.registerExtension({ } } - options.push({ - content: "Add Selected Nodes To Group", - disabled: !Object.keys(app.canvas.selected_nodes || {}).length, - callback: () => { - addNodesToGroup(group, this.selected_nodes) - this.graph.change(); - } - }); - options.push({ content: "Fit Group To Nodes", callback: () => { From 682c84ccf3fb601d0bd3ae8a506cc7e3d42d2199 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Mon, 16 Oct 2023 16:00:01 -0300 Subject: [PATCH 094/420] Fix fit group to nodes with reroute and collapsed nodes --- web/extensions/core/groupOptions.js | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/groupOptions.js b/web/extensions/core/groupOptions.js index d3d52bf2912..5dd21e73016 100644 --- a/web/extensions/core/groupOptions.js +++ b/web/extensions/core/groupOptions.js @@ -22,6 +22,18 @@ function addNodesToGroup(group, nodes=[]) { nx2 = node.pos[0] + node.size[0] ny2 = node.pos[1] + node.size[1] + if (node.type != "Reroute") { + ny1 -= LiteGraph.NODE_TITLE_HEIGHT; + } + + if (node.flags?.collapsed) { + ny2 = ny1 + LiteGraph.NODE_TITLE_HEIGHT; + + if (node?._collapsed_width) { + nx2 = nx1 + Math.round(node._collapsed_width); + } + } + if (x1 == -1 || nx1 < x1) { x1 = nx1; } @@ -42,7 +54,7 @@ function addNodesToGroup(group, nodes=[]) { var padding = 10; - y1 = y1 - Math.round(group.font_size * 2.7); + y1 = y1 - Math.round(group.font_size * 1.4); group.pos = [x1 - padding, y1 - padding]; group.size = [x2 - x1 + padding * 2, y2 - y1 + padding * 2]; From 5a608aa37c3113db286cc7d3c06147c175249325 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Mon, 16 Oct 2023 17:29:23 -0300 Subject: [PATCH 095/420] Fix node getBounding for collapsed nodes --- web/lib/litegraph.core.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index f81c83a8a4c..e906590f5ef 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -3796,7 +3796,7 @@ out = out || new Float32Array(4); out[0] = this.pos[0] - 4; out[1] = this.pos[1] - LiteGraph.NODE_TITLE_HEIGHT; - out[2] = this.size[0] + 4; + out[2] = this.flags.collapsed ? (this._collapsed_width || LiteGraph.NODE_COLLAPSED_WIDTH) : this.size[0] + 4; out[3] = this.flags.collapsed ? LiteGraph.NODE_TITLE_HEIGHT : this.size[1] + LiteGraph.NODE_TITLE_HEIGHT; if (this.onBounding) { From c8013f73e57b27f8e89d96a13d806d4340bd48a0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 16 Oct 2023 16:46:41 -0400 Subject: [PATCH 096/420] Add some Quadro cards to the list of cards with broken fp16. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index c24c7b27e3d..64ed19727f4 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -667,7 +667,7 @@ def should_use_fp16(device=None, model_params=0, prioritize_performance=True): return False #FP16 is just broken on these cards - nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX"] + nvidia_16_series = ["1660", "1650", "1630", "T500", "T550", "T600", "MX550", "MX450", "CMP 30HX", "T2000", "T1000", "T1200"] for x in nvidia_16_series: if x in props.name: return False From 23680a9155727b67eb542445417ec362a53f2148 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 17 Oct 2023 03:19:29 -0400 Subject: [PATCH 097/420] Refactor the attention stuff in the VAE. --- comfy/ldm/modules/diffusionmodules/model.py | 194 ++++++-------------- 1 file changed, 58 insertions(+), 136 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 6576df4b68e..852d367c9af 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -193,6 +193,52 @@ def slice_attention(q, k, v): return r1 +def normal_attention(q, k, v): + # compute attention + b,c,h,w = q.shape + + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + v = v.reshape(b,c,h*w) + + r1 = slice_attention(q, k, v) + h_ = r1.reshape(b,c,h,w) + del r1 + return h_ + +def xformers_attention(q, k, v): + # compute attention + B, C, H, W = q.shape + q, k, v = map( + lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(), + (q, k, v), + ) + + try: + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) + out = out.transpose(1, 2).reshape(B, C, H, W) + except NotImplementedError as e: + out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) + return out + +def pytorch_attention(q, k, v): + # compute attention + B, C, H, W = q.shape + q, k, v = map( + lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(), + (q, k, v), + ) + + try: + out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) + out = out.transpose(2, 3).reshape(B, C, H, W) + except model_management.OOM_EXCEPTION as e: + print("scaled_dot_product_attention OOMed: switched to slice attention") + out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) + return out + + class AttnBlock(nn.Module): def __init__(self, in_channels): super().__init__() @@ -220,6 +266,16 @@ def __init__(self, in_channels): stride=1, padding=0) + if model_management.xformers_enabled_vae(): + print("Using xformers attention in VAE") + self.optimized_attention = xformers_attention + elif model_management.pytorch_attention_enabled(): + print("Using pytorch attention in VAE") + self.optimized_attention = pytorch_attention + else: + print("Using split attention in VAE") + self.optimized_attention = normal_attention + def forward(self, x): h_ = x h_ = self.norm(h_) @@ -227,149 +283,15 @@ def forward(self, x): k = self.k(h_) v = self.v(h_) - # compute attention - b,c,h,w = q.shape - - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - v = v.reshape(b,c,h*w) + h_ = self.optimized_attention(q, k, v) - r1 = slice_attention(q, k, v) - h_ = r1.reshape(b,c,h,w) - del r1 h_ = self.proj_out(h_) return x+h_ -class MemoryEfficientAttnBlock(nn.Module): - """ - Uses xformers efficient implementation, - see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223 - Note: this is a single-head self-attention operation - """ - # - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.attention_op: Optional[Any] = None - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - B, C, H, W = q.shape - q, k, v = map( - lambda t: t.view(B, C, -1).transpose(1, 2).contiguous(), - (q, k, v), - ) - - try: - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) - out = out.transpose(1, 2).reshape(B, C, H, W) - except NotImplementedError as e: - out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) - - out = self.proj_out(out) - return x+out - -class MemoryEfficientAttnBlockPytorch(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = comfy.ops.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.attention_op: Optional[Any] = None - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - B, C, H, W = q.shape - q, k, v = map( - lambda t: t.view(B, 1, C, -1).transpose(2, 3).contiguous(), - (q, k, v), - ) - - try: - out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) - out = out.transpose(2, 3).reshape(B, C, H, W) - except model_management.OOM_EXCEPTION as e: - print("scaled_dot_product_attention OOMed: switched to slice attention") - out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) - - out = self.proj_out(out) - return x+out def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None): - assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown' - if model_management.xformers_enabled_vae() and attn_type == "vanilla": - attn_type = "vanilla-xformers" - elif model_management.pytorch_attention_enabled() and attn_type == "vanilla": - attn_type = "vanilla-pytorch" - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - assert attn_kwargs is None - return AttnBlock(in_channels) - elif attn_type == "vanilla-xformers": - print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...") - return MemoryEfficientAttnBlock(in_channels) - elif attn_type == "vanilla-pytorch": - return MemoryEfficientAttnBlockPytorch(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - raise NotImplementedError() + return AttnBlock(in_channels) class Model(nn.Module): From 92f0318630f4567227dd8b4b6e53328ec5526195 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 17 Oct 2023 11:39:15 -0400 Subject: [PATCH 098/420] Try to fix notebook. --- notebooks/comfyui_colab.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index 4fdccaace44..ec83265b42c 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -47,7 +47,7 @@ " !git pull\n", "\n", "!echo -= Install dependencies =-\n", - "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117" + "!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117" ] }, { From f8caa24bcc6abf226aeb486beeffd853e711c770 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 17 Oct 2023 12:08:03 -0400 Subject: [PATCH 099/420] Support hypernetwork with mish activation function and layer norm. --- comfy_extras/nodes_hypernetwork.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index d16c49aeb24..f692945a86b 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -19,6 +19,7 @@ def load_hypernetwork_patch(path, strength): "tanh": torch.nn.Tanh, "sigmoid": torch.nn.Sigmoid, "softsign": torch.nn.Softsign, + "mish": torch.nn.Mish, } if activation_func not in valid_activation: @@ -42,7 +43,8 @@ def load_hypernetwork_patch(path, strength): linears = list(map(lambda a: a[:-len(".weight")], linears)) layers = [] - for i in range(len(linears)): + i = 0 + while i < len(linears): lin_name = linears[i] last_layer = (i == (len(linears) - 1)) penultimate_layer = (i == (len(linears) - 2)) @@ -56,10 +58,17 @@ def load_hypernetwork_patch(path, strength): if (not last_layer) or (activate_output): layers.append(valid_activation[activation_func]()) if is_layer_norm: - layers.append(torch.nn.LayerNorm(lin_weight.shape[0])) + i += 1 + ln_name = linears[i] + ln_weight = attn_weights['{}.weight'.format(ln_name)] + ln_bias = attn_weights['{}.bias'.format(ln_name)] + ln = torch.nn.LayerNorm(ln_weight.shape[0]) + ln.load_state_dict({"weight": ln_weight, "bias": ln_bias}) + layers.append(ln) if use_dropout: if (not last_layer) and (not penultimate_layer or last_layer_dropout): layers.append(torch.nn.Dropout(p=0.3)) + i += 1 output.append(torch.nn.Sequential(*layers)) out[dim] = torch.nn.ModuleList(output) From d44a2de49f797af6b1adc709e709d6b0b4d89093 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 17 Oct 2023 14:51:51 -0400 Subject: [PATCH 100/420] Make VAE code closer to sgm. --- comfy/diffusers_load.py | 3 +- comfy/ldm/models/autoencoder.py | 370 ++++++++++---------- comfy/ldm/modules/diffusionmodules/model.py | 31 +- comfy/sd.py | 39 +-- comfy/utils.py | 11 +- nodes.py | 3 +- 6 files changed, 235 insertions(+), 222 deletions(-) diff --git a/comfy/diffusers_load.py b/comfy/diffusers_load.py index a52e0102b73..c0b420e7966 100644 --- a/comfy/diffusers_load.py +++ b/comfy/diffusers_load.py @@ -31,6 +31,7 @@ def load_diffusers(model_path, output_vae=True, output_clip=True, embedding_dire vae = None if output_vae: - vae = comfy.sd.VAE(ckpt_path=vae_path) + sd = comfy.utils.load_torch_file(vae_path) + vae = comfy.sd.VAE(sd=sd) return (unet, clip, vae) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index 1fb7ed879fc..d2f1d74a938 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -2,67 +2,66 @@ # import pytorch_lightning as pl import torch.nn.functional as F from contextlib import contextmanager +from typing import Any, Dict, List, Optional, Tuple, Union -from comfy.ldm.modules.diffusionmodules.model import Encoder, Decoder from comfy.ldm.modules.distributions.distributions import DiagonalGaussianDistribution from comfy.ldm.util import instantiate_from_config from comfy.ldm.modules.ema import LitEma -# class AutoencoderKL(pl.LightningModule): -class AutoencoderKL(torch.nn.Module): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ema_decay=None, - learn_logvar=False - ): +class DiagonalGaussianRegularizer(torch.nn.Module): + def __init__(self, sample: bool = True): super().__init__() - self.learn_logvar = learn_logvar - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + self.sample = sample + + def get_trainable_parameters(self) -> Any: + yield from () + + def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: + log = dict() + posterior = DiagonalGaussianDistribution(z) + if self.sample: + z = posterior.sample() + else: + z = posterior.mode() + kl_loss = posterior.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + log["kl_loss"] = kl_loss + return z, log + + +class AbstractAutoencoder(torch.nn.Module): + """ + This is the base class for all autoencoders, including image autoencoders, image autoencoders with discriminators, + unCLIP models, etc. Hence, it is fairly general, and specific features + (e.g. discriminator training, encoding, decoding) must be implemented in subclasses. + """ + + def __init__( + self, + ema_decay: Union[None, float] = None, + monitor: Union[None, str] = None, + input_key: str = "jpg", + **kwargs, + ): + super().__init__() + + self.input_key = input_key + self.use_ema = ema_decay is not None if monitor is not None: self.monitor = monitor - self.use_ema = ema_decay is not None if self.use_ema: - self.ema_decay = ema_decay - assert 0. < ema_decay < 1. self.model_ema = LitEma(self, decay=ema_decay) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + logpy.info(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + def get_input(self, batch) -> Any: + raise NotImplementedError() - def init_from_ckpt(self, path, ignore_keys=list()): - if path.lower().endswith(".safetensors"): - import safetensors.torch - sd = safetensors.torch.load_file(path, device="cpu") - else: - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") + def on_train_batch_end(self, *args, **kwargs): + # for EMA computation + if self.use_ema: + self.model_ema(self) @contextmanager def ema_scope(self, context=None): @@ -70,154 +69,159 @@ def ema_scope(self, context=None): self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: - print(f"{context}: Switched to EMA weights") + logpy.info(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: - print(f"{context}: Restored training weights") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, postfix=""): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val"+postfix) - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val"+postfix) - - self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list( - self.quant_conv.parameters()) + list(self.post_quant_conv.parameters()) - if self.learn_logvar: - print(f"{self.__class__.__name__}: Learning logvar") - ae_params_list.append(self.loss.logvar) - opt_ae = torch.optim.Adam(ae_params_list, - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] + logpy.info(f"{context}: Restored training weights") + + def encode(self, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError("encode()-method of abstract base class called") + + def decode(self, *args, **kwargs) -> torch.Tensor: + raise NotImplementedError("decode()-method of abstract base class called") + + def instantiate_optimizer_from_config(self, params, lr, cfg): + logpy.info(f"loading >>> {cfg['target']} <<< optimizer from config") + return get_obj_from_str(cfg["target"])( + params, lr=lr, **cfg.get("params", dict()) + ) + + def configure_optimizers(self) -> Any: + raise NotImplementedError() + + +class AutoencodingEngine(AbstractAutoencoder): + """ + Base class for all image autoencoders that we train, like VQGAN or AutoencoderKL + (we also restore them explicitly as special cases for legacy reasons). + Regularizations such as KL or VQ are moved to the regularizer class. + """ + + def __init__( + self, + *args, + encoder_config: Dict, + decoder_config: Dict, + regularizer_config: Dict, + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.encoder: torch.nn.Module = instantiate_from_config(encoder_config) + self.decoder: torch.nn.Module = instantiate_from_config(decoder_config) + self.regularization: AbstractRegularizer = instantiate_from_config( + regularizer_config + ) def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - if log_ema or self.use_ema: - with self.ema_scope(): - xrec_ema, posterior_ema = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec_ema.shape[1] > 3 - xrec_ema = self.to_rgb(xrec_ema) - log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample())) - log["reconstructions_ema"] = xrec_ema - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return self.decoder.get_last_layer() + + def encode( + self, + x: torch.Tensor, + return_reg_log: bool = False, + unregularized: bool = False, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]: + z = self.encoder(x) + if unregularized: + return z, dict() + z, reg_log = self.regularization(z) + if return_reg_log: + return z, reg_log + return z + + def decode(self, z: torch.Tensor, **kwargs) -> torch.Tensor: + x = self.decoder(z, **kwargs) return x + def forward( + self, x: torch.Tensor, **additional_decode_kwargs + ) -> Tuple[torch.Tensor, torch.Tensor, dict]: + z, reg_log = self.encode(x, return_reg_log=True) + dec = self.decode(z, **additional_decode_kwargs) + return z, dec, reg_log + + +class AutoencodingEngineLegacy(AutoencodingEngine): + def __init__(self, embed_dim: int, **kwargs): + self.max_batch_size = kwargs.pop("max_batch_size", None) + ddconfig = kwargs.pop("ddconfig") + super().__init__( + encoder_config={ + "target": "comfy.ldm.modules.diffusionmodules.model.Encoder", + "params": ddconfig, + }, + decoder_config={ + "target": "comfy.ldm.modules.diffusionmodules.model.Decoder", + "params": ddconfig, + }, + **kwargs, + ) + self.quant_conv = torch.nn.Conv2d( + (1 + ddconfig["double_z"]) * ddconfig["z_channels"], + (1 + ddconfig["double_z"]) * embed_dim, + 1, + ) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface - super().__init__() - - def encode(self, x, *args, **kwargs): - return x + def get_autoencoder_params(self) -> list: + params = super().get_autoencoder_params() + return params - def decode(self, x, *args, **kwargs): - return x + def encode( + self, x: torch.Tensor, return_reg_log: bool = False + ) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]: + if self.max_batch_size is None: + z = self.encoder(x) + z = self.quant_conv(z) + else: + N = x.shape[0] + bs = self.max_batch_size + n_batches = int(math.ceil(N / bs)) + z = list() + for i_batch in range(n_batches): + z_batch = self.encoder(x[i_batch * bs : (i_batch + 1) * bs]) + z_batch = self.quant_conv(z_batch) + z.append(z_batch) + z = torch.cat(z, 0) + + z, reg_log = self.regularization(z) + if return_reg_log: + return z, reg_log + return z + + def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor: + if self.max_batch_size is None: + dec = self.post_quant_conv(z) + dec = self.decoder(dec, **decoder_kwargs) + else: + N = z.shape[0] + bs = self.max_batch_size + n_batches = int(math.ceil(N / bs)) + dec = list() + for i_batch in range(n_batches): + dec_batch = self.post_quant_conv(z[i_batch * bs : (i_batch + 1) * bs]) + dec_batch = self.decoder(dec_batch, **decoder_kwargs) + dec.append(dec_batch) + dec = torch.cat(dec, 0) - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x + return dec - def forward(self, x, *args, **kwargs): - return x +class AutoencoderKL(AutoencodingEngineLegacy): + def __init__(self, **kwargs): + if "lossconfig" in kwargs: + kwargs["loss_config"] = kwargs.pop("lossconfig") + super().__init__( + regularizer_config={ + "target": ( + "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer" + ) + }, + **kwargs, + ) diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index 852d367c9af..f23417fd216 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -541,7 +541,10 @@ class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): + conv_out_op=comfy.ops.Conv2d, + resnet_op=ResnetBlock, + attn_op=AttnBlock, + **ignorekwargs): super().__init__() if use_linear_attn: attn_type = "linear" self.ch = ch @@ -570,12 +573,12 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, # middle self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, + self.mid.block_1 = resnet_op(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, + self.mid.attn_1 = attn_op(block_in) + self.mid.block_2 = resnet_op(in_channels=block_in, out_channels=block_in, temb_channels=self.temb_ch, dropout=dropout) @@ -587,13 +590,13 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn = nn.ModuleList() block_out = ch*ch_mult[i_level] for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, + block.append(resnet_op(in_channels=block_in, out_channels=block_out, temb_channels=self.temb_ch, dropout=dropout)) block_in = block_out if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) + attn.append(attn_op(block_in)) up = nn.Module() up.block = block up.attn = attn @@ -604,13 +607,13 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, # end self.norm_out = Normalize(block_in) - self.conv_out = comfy.ops.Conv2d(block_in, + self.conv_out = conv_out_op(block_in, out_ch, kernel_size=3, stride=1, padding=1) - def forward(self, z): + def forward(self, z, **kwargs): #assert z.shape[1:] == self.z_shape[1:] self.last_z_shape = z.shape @@ -621,16 +624,16 @@ def forward(self, z): h = self.conv_in(z) # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) + h = self.mid.block_1(h, temb, **kwargs) + h = self.mid.attn_1(h, **kwargs) + h = self.mid.block_2(h, temb, **kwargs) # upsampling for i_level in reversed(range(self.num_resolutions)): for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) + h = self.up[i_level].block[i_block](h, temb, **kwargs) if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) + h = self.up[i_level].attn[i_block](h, **kwargs) if i_level != 0: h = self.up[i_level].upsample(h) @@ -640,7 +643,7 @@ def forward(self, z): h = self.norm_out(h) h = nonlinearity(h) - h = self.conv_out(h) + h = self.conv_out(h, **kwargs) if self.tanh_out: h = torch.tanh(h) return h diff --git a/comfy/sd.py b/comfy/sd.py index fd8b94df895..48ee5721b48 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -4,7 +4,7 @@ from comfy import model_management from .ldm.util import instantiate_from_config -from .ldm.models.autoencoder import AutoencoderKL +from .ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine import yaml import comfy.utils @@ -140,21 +140,24 @@ def get_key_patches(self): return self.patcher.get_key_patches() class VAE: - def __init__(self, ckpt_path=None, device=None, config=None): + def __init__(self, sd=None, device=None, config=None): + if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format + sd = diffusers_convert.convert_vae_state_dict(sd) + if config is None: #default SD1.x/SD2.x VAE parameters ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} - self.first_stage_model = AutoencoderKL(ddconfig, {'target': 'torch.nn.Identity'}, 4, monitor="val/rec_loss") + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) else: self.first_stage_model = AutoencoderKL(**(config['params'])) self.first_stage_model = self.first_stage_model.eval() - if ckpt_path is not None: - sd = comfy.utils.load_torch_file(ckpt_path) - if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format - sd = diffusers_convert.convert_vae_state_dict(sd) - m, u = self.first_stage_model.load_state_dict(sd, strict=False) - if len(m) > 0: - print("Missing VAE keys", m) + + m, u = self.first_stage_model.load_state_dict(sd, strict=False) + if len(m) > 0: + print("Missing VAE keys", m) + + if len(u) > 0: + print("Leftover VAE keys", u) if device is None: device = model_management.vae_device() @@ -183,7 +186,7 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): steps += pixel_samples.shape[0] * comfy.utils.get_tiled_scale_steps(pixel_samples.shape[3], pixel_samples.shape[2], tile_x * 2, tile_y // 2, overlap) pbar = comfy.utils.ProgressBar(steps) - encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).sample().float() + encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float() samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) @@ -229,7 +232,7 @@ def encode(self, pixel_samples): samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu") for x in range(0, pixel_samples.shape[0], batch_number): pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device) - samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).sample().cpu().float() + samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).cpu().float() except model_management.OOM_EXCEPTION as e: print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") @@ -375,10 +378,8 @@ class EmptyClass: model.load_model_weights(state_dict, "model.diffusion_model.") if output_vae: - w = WeightsLoader() - vae = VAE(config=vae_config) - w.first_stage_model = vae.first_stage_model - load_model_weights(w, state_dict) + vae_sd = comfy.utils.state_dict_prefix_replace(state_dict, {"first_stage_model.": ""}, filter_keys=True) + vae = VAE(sd=vae_sd, config=vae_config) if output_clip: w = WeightsLoader() @@ -427,10 +428,8 @@ class WeightsLoader(torch.nn.Module): model.load_model_weights(sd, "model.diffusion_model.") if output_vae: - vae = VAE() - w = WeightsLoader() - w.first_stage_model = vae.first_stage_model - load_model_weights(w, sd) + vae_sd = comfy.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True) + vae = VAE(sd=vae_sd) if output_clip: w = WeightsLoader() diff --git a/comfy/utils.py b/comfy/utils.py index df016ef9e2e..a1807aa1d47 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -47,12 +47,17 @@ def state_dict_key_replace(state_dict, keys_to_replace): state_dict[keys_to_replace[x]] = state_dict.pop(x) return state_dict -def state_dict_prefix_replace(state_dict, replace_prefix): +def state_dict_prefix_replace(state_dict, replace_prefix, filter_keys=False): + if filter_keys: + out = {} + else: + out = state_dict for rp in replace_prefix: replace = list(map(lambda a: (a, "{}{}".format(replace_prefix[rp], a[len(rp):])), filter(lambda a: a.startswith(rp), state_dict.keys()))) for x in replace: - state_dict[x[1]] = state_dict.pop(x[0]) - return state_dict + w = state_dict.pop(x[0]) + out[x[1]] = w + return out def transformers_convert(sd, prefix_from, prefix_to, number): diff --git a/nodes.py b/nodes.py index d0e9ffcc5dc..0dbc2be32fd 100644 --- a/nodes.py +++ b/nodes.py @@ -584,7 +584,8 @@ def INPUT_TYPES(s): #TODO: scale factor? def load_vae(self, vae_name): vae_path = folder_paths.get_full_path("vae", vae_name) - vae = comfy.sd.VAE(ckpt_path=vae_path) + sd = comfy.utils.load_torch_file(vae_path) + vae = comfy.sd.VAE(sd=sd) return (vae,) class ControlNetLoader: From 6dbb18df928b97ef7858d8df1bf0bc0003d5f302 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 17 Oct 2023 17:53:57 -0300 Subject: [PATCH 101/420] Export and import templates --- web/extensions/core/nodeTemplates.js | 146 ++++++++++++++++++++++----- web/scripts/app.js | 44 +++++++- 2 files changed, 164 insertions(+), 26 deletions(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 7059f826d74..118565169db 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -22,6 +22,15 @@ class ManageTemplates extends ComfyDialog { super(); this.element.classList.add("comfy-manage-templates"); this.templates = this.load(); + + this.importInput = $el("input", { + type: "file", + accept: ".json", + multiple: true, + style: {display: "none"}, + parent: document.body, + onchange: () => this.importAll(), + }); } createButtons() { @@ -34,6 +43,22 @@ class ManageTemplates extends ComfyDialog { onclick: () => this.save(), }) ); + btns.unshift( + $el("button", { + type: "button", + textContent: "Export", + onclick: () => this.exportAll(), + }) + ); + btns.unshift( + $el("button", { + type: "button", + textContent: "Import", + onclick: () => { + this.importInput.click(); + }, + }) + ); return btns; } @@ -69,6 +94,50 @@ class ManageTemplates extends ComfyDialog { localStorage.setItem(id, JSON.stringify(this.templates)); } + async importAll() { + for (const file of this.importInput.files) { + if (file.type === "application/json" || file.name.endsWith(".json")) { + const reader = new FileReader(); + reader.onload = async () => { + var importFile = JSON.parse(reader.result); + if (importFile && importFile?.templates) { + for (const template of importFile.templates) { + if (template?.name && template?.data) { + this.templates.push(template); + } + } + this.store(); + } + }; + await reader.readAsText(file); + } + } + + this.close(); + } + + exportAll() { + if (this.templates.length == 0) { + alert("No templates to export."); + return; + } + + const json = JSON.stringify({templates: this.templates}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: "node_templates.json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + } + show() { // Show list of template names + delete button super.show( @@ -97,19 +166,48 @@ class ManageTemplates extends ComfyDialog { }), ] ), - $el("button", { - textContent: "Delete", - style: { - fontSize: "12px", - color: "red", - fontWeight: "normal", - }, - onclick: (e) => { - nameInput.value = ""; - e.target.style.display = "none"; - e.target.previousElementSibling.style.display = "none"; - }, - }), + $el( + "div", + {}, + [ + $el("button", { + textContent: "Export", + style: { + fontSize: "12px", + fontWeight: "normal", + }, + onclick: (e) => { + const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: t.name + ".json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + }, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + nameInput.value = ""; + e.target.parentElement.style.display = "none"; + e.target.parentElement.previousElementSibling.style.display = "none"; + }, + }), + ] + ), ]; }) ) @@ -164,19 +262,17 @@ app.registerExtension({ }, })); - if (subItems.length) { - subItems.push(null, { - content: "Manage", - callback: () => manage.show(), - }); + subItems.push(null, { + content: "Manage", + callback: () => manage.show(), + }); - options.push({ - content: "Node Templates", - submenu: { - options: subItems, - }, - }); - } + options.push({ + content: "Node Templates", + submenu: { + options: subItems, + }, + }); return options; }; diff --git a/web/scripts/app.js b/web/scripts/app.js index 1a07d69bcf7..acbd30b2d42 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1416,6 +1416,43 @@ export class ComfyApp { } } + loadTemplateData(templateData) { + if (!templateData?.templates) { + return; + } + + const old = localStorage.getItem("litegrapheditor_clipboard"); + + var maxY, nodeBottom, node; + + for (const template of templateData.templates) { + if (!template?.data) { + continue; + } + + localStorage.setItem("litegrapheditor_clipboard", template.data); + app.canvas.pasteFromClipboard(); + + // Move mouse position down to paste the next template below + + maxY = false; + + for (const i in app.canvas.selected_nodes) { + node = app.canvas.selected_nodes[i]; + + nodeBottom = node.pos[1] + node.size[1]; + + if (maxY === false || nodeBottom > maxY) { + maxY = nodeBottom; + } + } + + app.canvas.graph_mouse[1] = maxY + 50; + } + + localStorage.setItem("litegrapheditor_clipboard", old); + } + /** * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object @@ -1756,7 +1793,12 @@ export class ComfyApp { } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { - this.loadGraphData(JSON.parse(reader.result)); + var jsonContent = JSON.parse(reader.result); + if (jsonContent?.templates) { + this.loadTemplateData(jsonContent); + } else { + this.loadGraphData(jsonContent); + } }; reader.readAsText(file); } else if (file.name?.endsWith(".latent") || file.name?.endsWith(".safetensors")) { From a5550747370984714caa859c1c58cd77f43f9008 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 17 Oct 2023 19:44:26 -0300 Subject: [PATCH 102/420] Use name from input to export single node template --- web/extensions/core/nodeTemplates.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 118565169db..92d57f9d44d 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -182,7 +182,7 @@ class ManageTemplates extends ComfyDialog { const url = URL.createObjectURL(blob); const a = $el("a", { href: url, - download: t.name + ".json", + download: (nameInput.value || t.name) + ".json", style: {display: "none"}, parent: document.body, }); From c2bb34d865a4c512091e6036d421dd26d0889b25 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 02:04:41 -0400 Subject: [PATCH 103/420] Implement updated FreeU as _for_testing->FreeU_V2 node --- comfy_extras/nodes_freelunch.py | 46 +++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 07a88bd9614..7512b841d74 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -61,7 +61,53 @@ def output_block_patch(h, hsp, transformer_options): m.set_model_output_block_patch(output_block_patch) return (m, ) +class FreeU_V2: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "b1": ("FLOAT", {"default": 1.3, "min": 0.0, "max": 10.0, "step": 0.01}), + "b2": ("FLOAT", {"default": 1.4, "min": 0.0, "max": 10.0, "step": 0.01}), + "s1": ("FLOAT", {"default": 0.9, "min": 0.0, "max": 10.0, "step": 0.01}), + "s2": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, b1, b2, s1, s2): + model_channels = model.model.model_config.unet_config["model_channels"] + scale_dict = {model_channels * 4: (b1, s1), model_channels * 2: (b2, s2)} + on_cpu_devices = {} + + def output_block_patch(h, hsp, transformer_options): + scale = scale_dict.get(h.shape[1], None) + if scale is not None: + hidden_mean = h.mean(1).unsqueeze(1) + B = hidden_mean.shape[0] + hidden_max, _ = torch.max(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_min, _ = torch.min(hidden_mean.view(B, -1), dim=-1, keepdim=True) + hidden_mean = (hidden_mean - hidden_min.unsqueeze(2).unsqueeze(3)) / (hidden_max - hidden_min).unsqueeze(2).unsqueeze(3) + + h[:,:h.shape[1] // 2] = h[:,:h.shape[1] // 2] * ((scale[0] - 1 ) * hidden_mean + 1) + + if hsp.device not in on_cpu_devices: + try: + hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) + except: + print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + on_cpu_devices[hsp.device] = True + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + else: + hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) + + return h, hsp + + m = model.clone() + m.set_model_output_block_patch(output_block_patch) + return (m, ) NODE_CLASS_MAPPINGS = { "FreeU": FreeU, + "FreeU_V2": FreeU_V2, } From 0d45a565daeb8e828680b00770c0e2e03f9955c2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 02:43:01 -0400 Subject: [PATCH 104/420] Fix memory issue related to control loras. The cleanup function was not getting called. --- comfy/sample.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sample.py b/comfy/sample.py index 322272766f0..e6a69973d93 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -98,6 +98,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None): @@ -109,5 +110,6 @@ def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) samples = samples.cpu() cleanup_additional_models(models) + cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) return samples From 782a24fce65272649191635ce43e3bec5e09c5e2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 16:48:37 -0400 Subject: [PATCH 105/420] Refactor cond_concat into model object. --- comfy/model_base.py | 34 +++++++++++++++++++++++++++++++++- comfy/samplers.py | 28 ++++------------------------ 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index ed2dc83e4e0..8e704022ed8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -26,6 +26,7 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None): self.adm_channels = unet_config.get("adm_in_channels", None) if self.adm_channels is None: self.adm_channels = 0 + self.inpaint_model = False print("model_type", model_type.name) print("adm", self.adm_channels) @@ -71,6 +72,37 @@ def is_adm(self): def encode_adm(self, **kwargs): return None + def cond_concat(self, **kwargs): + if self.inpaint_model: + concat_keys = ("mask", "masked_image") + cond_concat = [] + denoise_mask = kwargs.get("denoise_mask", None) + latent_image = kwargs.get("latent_image", None) + noise = kwargs.get("noise", None) + + def blank_inpaint_image_like(latent_image): + blank_image = torch.ones_like(latent_image) + # these are the values for "zero" in pixel space translated to latent space + blank_image[:,0] *= 0.8223 + blank_image[:,1] *= -0.6876 + blank_image[:,2] *= 0.6364 + blank_image[:,3] *= 0.1380 + return blank_image + + for ck in concat_keys: + if denoise_mask is not None: + if ck == "mask": + cond_concat.append(denoise_mask[:,:1]) + elif ck == "masked_image": + cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space + else: + if ck == "mask": + cond_concat.append(torch.ones_like(noise)[:,:1]) + elif ck == "masked_image": + cond_concat.append(blank_inpaint_image_like(noise)) + return cond_concat + return None + def load_model_weights(self, sd, unet_prefix=""): to_load = {} keys = list(sd.keys()) @@ -112,7 +144,7 @@ def state_dict_for_saving(self, clip_state_dict, vae_state_dict): return {**unet_state_dict, **vae_state_dict, **clip_state_dict} def set_inpaint(self): - self.concat_keys = ("mask", "masked_image") + self.inpaint_model = True def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): adm_inputs = [] diff --git a/comfy/samplers.py b/comfy/samplers.py index e43f7a6fe74..bb8bfdfa492 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -358,15 +358,6 @@ def sgm_scheduler(model, steps): sigs += [0.0] return torch.FloatTensor(sigs) -def blank_inpaint_image_like(latent_image): - blank_image = torch.ones_like(latent_image) - # these are the values for "zero" in pixel space translated to latent space - blank_image[:,0] *= 0.8223 - blank_image[:,1] *= -0.6876 - blank_image[:,2] *= 0.6364 - blank_image[:,3] *= 0.1380 - return blank_image - def get_mask_aabb(masks): if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device, dtype=torch.int) @@ -671,21 +662,10 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} - cond_concat = None - if hasattr(model, 'concat_keys'): #inpaint - cond_concat = [] - for ck in model.concat_keys: - if denoise_mask is not None: - if ck == "mask": - cond_concat.append(denoise_mask[:,:1]) - elif ck == "masked_image": - cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space - else: - if ck == "mask": - cond_concat.append(torch.ones_like(noise)[:,:1]) - elif ck == "masked_image": - cond_concat.append(blank_inpaint_image_like(noise)) - extra_args["cond_concat"] = cond_concat + if hasattr(model, 'cond_concat'): + cond_concat = model.cond_concat(noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + if cond_concat is not None: + extra_args["cond_concat"] = cond_concat samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return model.process_latent_out(samples.to(torch.float32)) From 430a8334c500e00fb3b222082c6018cfcbc938aa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 19:48:36 -0400 Subject: [PATCH 106/420] Fix some potential issues. --- comfy/clip_vision.py | 5 ++++- comfy/sd.py | 9 +++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 1206c680d61..e085186ef68 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -92,8 +92,11 @@ def load_clipvision_from_sd(sd, prefix="", convert_keys=False): json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_g.json") elif "vision_model.encoder.layers.30.layer_norm1.weight" in sd: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_h.json") - else: + elif "vision_model.encoder.layers.22.layer_norm1.weight" in sd: json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_vision_config_vitl.json") + else: + return None + clip = ClipVisionModel(json_config) m, u = clip.load_sd(sd) if len(m) > 0: diff --git a/comfy/sd.py b/comfy/sd.py index 48ee5721b48..c364b723cb9 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -434,10 +434,11 @@ class WeightsLoader(torch.nn.Module): if output_clip: w = WeightsLoader() clip_target = model_config.clip_target() - clip = CLIP(clip_target, embedding_directory=embedding_directory) - w.cond_stage_model = clip.cond_stage_model - sd = model_config.process_clip_state_dict(sd) - load_model_weights(w, sd) + if clip_target is not None: + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model + sd = model_config.process_clip_state_dict(sd) + load_model_weights(w, sd) left_over = sd.keys() if len(left_over) > 0: From 45c972aba8cf95b229385bb58193d25fb77bccaa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 18 Oct 2023 20:36:37 -0400 Subject: [PATCH 107/420] Refactor cond_concat into conditioning. --- comfy/samplers.py | 61 +++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index bb8bfdfa492..a56599227ca 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -14,8 +14,8 @@ def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) #The main sampling function shared by all the samplers #Returns predicted noise -def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, cond_concat=None, model_options={}, seed=None): - def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in): +def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): + def get_area_and_mult(cond, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 if 'timestep_start' in cond[1]: @@ -68,12 +68,15 @@ def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in): conditionning = {} conditionning['c_crossattn'] = cond[0] - if cond_concat_in is not None and len(cond_concat_in) > 0: - cropped = [] - for x in cond_concat_in: - cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - cropped.append(cr) - conditionning['c_concat'] = torch.cat(cropped, dim=1) + + if 'concat' in cond[1]: + cond_concat_in = cond[1]['concat'] + if cond_concat_in is not None and len(cond_concat_in) > 0: + cropped = [] + for x in cond_concat_in: + cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + cropped.append(cr) + conditionning['c_concat'] = torch.cat(cropped, dim=1) if adm_cond is not None: conditionning['c_adm'] = adm_cond @@ -173,7 +176,7 @@ def cond_cat(c_list): out['c_adm'] = torch.cat(c_adm) return out - def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, cond_concat_in, model_options): + def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in)/100000.0 @@ -185,14 +188,14 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot to_run = [] for x in cond: - p = get_area_and_mult(x, x_in, cond_concat_in, timestep) + p = get_area_and_mult(x, x_in, timestep) if p is None: continue to_run += [(p, COND)] if uncond is not None: for x in uncond: - p = get_area_and_mult(x, x_in, cond_concat_in, timestep) + p = get_area_and_mult(x, x_in, timestep) if p is None: continue @@ -286,7 +289,7 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options) + cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep} return model_options["sampler_cfg_function"](args) @@ -307,8 +310,8 @@ def __init__(self, model): super().__init__() self.inner_model = model self.alphas_cumprod = model.alphas_cumprod - def apply_model(self, x, timestep, cond, uncond, cond_scale, cond_concat=None, model_options={}, seed=None): - out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed) + def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): + out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out @@ -316,11 +319,11 @@ class KSamplerX0Inpaint(torch.nn.Module): def __init__(self, model): super().__init__() self.inner_model = model - def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, cond_concat=None, model_options={}, seed=None): + def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_options={}, seed=None): if denoise_mask is not None: latent_mask = 1. - denoise_mask x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask - out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed) + out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed) if denoise_mask is not None: out *= denoise_mask @@ -534,6 +537,19 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): return conds +def encode_cond(model_function, key, conds, **kwargs): + for t in range(len(conds)): + x = conds[t] + params = x[1].copy() + for k in kwargs: + if k not in params: + params[k] = kwargs[k] + + out = model_function(**params) + if out is not None: + x[1] = x[1].copy() + x[1][key] = out + return conds class Sampler: def sample(self): @@ -653,20 +669,19 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) + if latent_image is not None: + latent_image = model.process_latent_in(latent_image) + if model.is_adm(): positive = encode_adm(model, positive, noise.shape[0], noise.shape[3], noise.shape[2], device, "positive") negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") - if latent_image is not None: - latent_image = model.process_latent_in(latent_image) + if hasattr(model, 'cond_concat'): + positive = encode_cond(model.cond_concat, "concat", positive, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_cond(model.cond_concat, "concat", negative, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} - if hasattr(model, 'cond_concat'): - cond_concat = model.cond_concat(noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - if cond_concat is not None: - extra_args["cond_concat"] = cond_concat - samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) return model.process_latent_out(samples.to(torch.float32)) From e6962120c6b6e36b3c87670a988ee825abba8dbe Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 19 Oct 2023 01:10:41 -0400 Subject: [PATCH 108/420] Make sure cond_concat is on the right device. --- comfy/model_base.py | 5 +++-- comfy/samplers.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 8e704022ed8..cda6765e43a 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -79,6 +79,7 @@ def cond_concat(self, **kwargs): denoise_mask = kwargs.get("denoise_mask", None) latent_image = kwargs.get("latent_image", None) noise = kwargs.get("noise", None) + device = kwargs["device"] def blank_inpaint_image_like(latent_image): blank_image = torch.ones_like(latent_image) @@ -92,9 +93,9 @@ def blank_inpaint_image_like(latent_image): for ck in concat_keys: if denoise_mask is not None: if ck == "mask": - cond_concat.append(denoise_mask[:,:1]) + cond_concat.append(denoise_mask[:,:1].to(device)) elif ck == "masked_image": - cond_concat.append(latent_image) #NOTE: the latent_image should be masked by the mask in pixel space + cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space else: if ck == "mask": cond_concat.append(torch.ones_like(noise)[:,:1]) diff --git a/comfy/samplers.py b/comfy/samplers.py index a56599227ca..4840b6d9f4e 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -537,10 +537,11 @@ def encode_adm(model, conds, batch_size, width, height, device, prompt_type): return conds -def encode_cond(model_function, key, conds, **kwargs): +def encode_cond(model_function, key, conds, device, **kwargs): for t in range(len(conds)): x = conds[t] params = x[1].copy() + params["device"] = device for k in kwargs: if k not in params: params[k] = kwargs[k] @@ -677,8 +678,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") if hasattr(model, 'cond_concat'): - positive = encode_cond(model.cond_concat, "concat", positive, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_cond(model.cond_concat, "concat", negative, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + positive = encode_cond(model.cond_concat, "concat", positive, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_cond(model.cond_concat, "concat", negative, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} From f1062be622eab6f989d2020f2c96cfff4f53c724 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" Date: Fri, 20 Oct 2023 00:07:08 +0900 Subject: [PATCH 109/420] fix: Fixing intermittent crashes with undefined graphs in the Firefox browser. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1a07d69bcf7..aadc7d3de8d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -492,7 +492,7 @@ export class ComfyApp { } if (this.imgs && this.imgs.length) { - const canvas = graph.list_of_graphcanvas[0]; + const canvas = app.graph.list_of_graphcanvas[0]; const mouse = canvas.graph_mouse; if (!canvas.pointer_is_down && this.pointerDown) { if (mouse[0] === this.pointerDown.pos[0] && mouse[1] === this.pointerDown.pos[1]) { From 4185324a1d0da3dd9d80e091361d9c218daab007 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 04:03:07 -0400 Subject: [PATCH 110/420] Fix uni_pc sampler math. This changes the images this sampler produces. --- comfy/extra_samplers/uni_pc.py | 51 +++++++++++++++++++--------------- comfy/k_diffusion/external.py | 4 +++ comfy/samplers.py | 2 +- 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 7e88bb9fa1b..58e030d0439 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -713,8 +713,8 @@ def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='tim method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', atol=0.0078, rtol=0.05, corrector=False, callback=None, disable_pbar=False ): - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start + # t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + # t_T = self.noise_schedule.T if t_start is None else t_start device = x.device steps = len(timesteps) - 1 if method == 'multistep': @@ -769,8 +769,8 @@ def sample(self, x, timesteps, t_start=None, t_end=None, order=3, skip_type='tim callback(step_index, model_prev_list[-1], x, steps) else: raise NotImplementedError() - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + # if denoise_to_zero: + # x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) return x @@ -833,21 +833,33 @@ def expand_dims(v, dims): return v[(...,) + (None,)*(dims - 1)] +class SigmaConvert: + schedule = "" + def marginal_log_mean_coeff(self, sigma): + return 0.5 * torch.log(1 / ((sigma * sigma) + 1)) + + def marginal_alpha(self, t): + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): - to_zero = False + timesteps = sigmas.clone() if sigmas[-1] == 0: - timesteps = torch.nn.functional.interpolate(sigmas[None,None,:-1], size=(len(sigmas),), mode='linear')[0][0] - to_zero = True + timesteps = sigmas[:] + timesteps[-1] = 0.001 else: timesteps = sigmas.clone() - - alphas_cumprod = model.inner_model.alphas_cumprod - - for s in range(timesteps.shape[0]): - timesteps[s] = (model.sigma_to_discrete_timestep(timesteps[s]) / 1000) + (1 / len(alphas_cumprod)) - - ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + ns = SigmaConvert() if image is not None: img = image * ns.marginal_alpha(timesteps[0]) @@ -859,16 +871,10 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex else: img = noise - if to_zero: - timesteps[-1] = (1 / len(alphas_cumprod)) - - device = noise.device - - model_type = "noise" model_fn = model_wrapper( - model.predict_eps_discrete_timestep, + model.predict_eps_sigma, ns, model_type=model_type, guidance_type="uncond", @@ -878,6 +884,5 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex order = min(3, len(timesteps) - 1) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable) - if not to_zero: - x /= ns.marginal_alpha(timesteps[-1]) + x /= ns.marginal_alpha(timesteps[-1]) return x diff --git a/comfy/k_diffusion/external.py b/comfy/k_diffusion/external.py index c1a137d9c0c..953d3db2c9f 100644 --- a/comfy/k_diffusion/external.py +++ b/comfy/k_diffusion/external.py @@ -97,6 +97,10 @@ def predict_eps_discrete_timestep(self, input, t, **kwargs): input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) + def predict_eps_sigma(self, input, sigma, **kwargs): + input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) + return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) + class DiscreteEpsDDPMDenoiser(DiscreteSchedule): """A wrapper for discrete schedule DDPM models that output eps (the predicted noise).""" diff --git a/comfy/samplers.py b/comfy/samplers.py index 4840b6d9f4e..0b38fbd1e86 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -739,7 +739,7 @@ def calculate_sigmas(self, steps): sigmas = None discard_penultimate_sigma = False - if self.sampler in ['dpm_2', 'dpm_2_ancestral']: + if self.sampler in ['dpm_2', 'dpm_2_ancestral', 'uni_pc', 'uni_pc_bh2']: steps += 1 discard_penultimate_sigma = True From 484bfe46c21cf108a687b579354b5996f867a4f7 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 20 Oct 2023 15:19:29 -0300 Subject: [PATCH 111/420] Clear importInput after import so change event works with same file --- web/extensions/core/nodeTemplates.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 92d57f9d44d..434491075c3 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -113,6 +113,8 @@ class ManageTemplates extends ComfyDialog { } } + this.importInput.value = null; + this.close(); } From 5818ca83a243430e6141ce0e7c1096b4ac83d392 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 21 Oct 2023 03:49:04 +0100 Subject: [PATCH 112/420] Unit tests + widget input fixes (#1760) * setup ui unit tests * Refactoring, adding connections * Few tweaks * Fix type * Add general test * Refactored and extended test * move to describe * for groups * Add test for converted widgets on missing nodes + fix crash * tidy * mores tests + refactor * throw earlier to get less confusing error * support outputs * more test * add ci action * use lts node * Fix? * Prevent connecting non matching combos * update * accidently removed npm i * Disable logging extension * added step to generate object_info * fix python * install python * install deps * fix cwd? * logging * Fix double resolve * create dir * update pkg --- .github/workflows/test-ui.yaml | 25 + .gitignore | 1 + tests-ui/.gitignore | 1 + tests-ui/babel.config.json | 3 + tests-ui/globalSetup.js | 14 + tests-ui/jest.config.js | 9 + tests-ui/package-lock.json | 5566 +++++++++++++++++++++++++++ tests-ui/package.json | 30 + tests-ui/setup.js | 87 + tests-ui/tests/widgetInputs.test.js | 319 ++ tests-ui/utils/ezgraph.js | 417 ++ tests-ui/utils/index.js | 71 + tests-ui/utils/litegraph.js | 36 + tests-ui/utils/nopProxy.js | 6 + tests-ui/utils/setup.js | 45 + web/extensions/core/widgetInputs.js | 68 +- 16 files changed, 6680 insertions(+), 18 deletions(-) create mode 100644 .github/workflows/test-ui.yaml create mode 100644 tests-ui/.gitignore create mode 100644 tests-ui/babel.config.json create mode 100644 tests-ui/globalSetup.js create mode 100644 tests-ui/jest.config.js create mode 100644 tests-ui/package-lock.json create mode 100644 tests-ui/package.json create mode 100644 tests-ui/setup.js create mode 100644 tests-ui/tests/widgetInputs.test.js create mode 100644 tests-ui/utils/ezgraph.js create mode 100644 tests-ui/utils/index.js create mode 100644 tests-ui/utils/litegraph.js create mode 100644 tests-ui/utils/nopProxy.js create mode 100644 tests-ui/utils/setup.js diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml new file mode 100644 index 00000000000..62b4c35f658 --- /dev/null +++ b/.github/workflows/test-ui.yaml @@ -0,0 +1,25 @@ +name: Tests CI + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v3 + with: + node-version: 18 + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Install requirements + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run Tests + run: | + npm install + npm run test:generate + npm test + working-directory: ./tests-ui diff --git a/.gitignore b/.gitignore index 98d91318d3d..43c038e4161 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ venv/ /web/extensions/* !/web/extensions/logging.js.example !/web/extensions/core/ +/tests-ui/data/object_info.json \ No newline at end of file diff --git a/tests-ui/.gitignore b/tests-ui/.gitignore new file mode 100644 index 00000000000..b512c09d476 --- /dev/null +++ b/tests-ui/.gitignore @@ -0,0 +1 @@ +node_modules \ No newline at end of file diff --git a/tests-ui/babel.config.json b/tests-ui/babel.config.json new file mode 100644 index 00000000000..526ddfd8df1 --- /dev/null +++ b/tests-ui/babel.config.json @@ -0,0 +1,3 @@ +{ + "presets": ["@babel/preset-env"] +} diff --git a/tests-ui/globalSetup.js b/tests-ui/globalSetup.js new file mode 100644 index 00000000000..b9d97f58a96 --- /dev/null +++ b/tests-ui/globalSetup.js @@ -0,0 +1,14 @@ +module.exports = async function () { + global.ResizeObserver = class ResizeObserver { + observe() {} + unobserve() {} + disconnect() {} + }; + + const { nop } = require("./utils/nopProxy"); + global.enableWebGLCanvas = nop; + + HTMLCanvasElement.prototype.getContext = nop; + + localStorage["Comfy.Settings.Comfy.Logging.Enabled"] = "false"; +}; diff --git a/tests-ui/jest.config.js b/tests-ui/jest.config.js new file mode 100644 index 00000000000..b5a5d646da7 --- /dev/null +++ b/tests-ui/jest.config.js @@ -0,0 +1,9 @@ +/** @type {import('jest').Config} */ +const config = { + testEnvironment: "jsdom", + setupFiles: ["./globalSetup.js"], + clearMocks: true, + resetModules: true, +}; + +module.exports = config; diff --git a/tests-ui/package-lock.json b/tests-ui/package-lock.json new file mode 100644 index 00000000000..35911cd7ffd --- /dev/null +++ b/tests-ui/package-lock.json @@ -0,0 +1,5566 @@ +{ + "name": "comfui-tests", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "comfui-tests", + "version": "1.0.0", + "license": "GPL-3.0", + "devDependencies": { + "@babel/preset-env": "^7.22.20", + "@types/jest": "^29.5.5", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz", + "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==", + "dev": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.0", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.22.13", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", + "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", + "dev": true, + "dependencies": { + "@babel/highlight": "^7.22.13", + "chalk": "^2.4.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/code-frame/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/code-frame/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/code-frame/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/code-frame/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/code-frame/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.20.tgz", + "integrity": "sha512-BQYjKbpXjoXwFW5jGqiizJQQT/aC7pFm9Ok1OWssonuguICi264lbgMzRp2ZMmRSlfkX6DsWDDcsrctK8Rwfiw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.0.tgz", + "integrity": "sha512-97z/ju/Jy1rZmDxybphrBuI+jtJjFVoz7Mr9yUQVVVi+DNZE333uFQeMOqcCIy1x3WYBIbWftUSLmbNXNT7qFQ==", + "dev": true, + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helpers": "^7.23.0", + "@babel/parser": "^7.23.0", + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.0", + "@babel/types": "^7.23.0", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", + "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.0", + "@jridgewell/gen-mapping": "^0.3.2", + "@jridgewell/trace-mapping": "^0.3.17", + "jsesc": "^2.5.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz", + "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-validator-option": "^7.22.15", + "browserslist": "^4.21.9", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz", + "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.2.tgz", + "integrity": "sha512-k0qnnOqHn5dK9pZpfD5XXZ9SojAITdCKRn2Lp6rnDGzIbaP0rHyMPk/4wsSxVBVz4RfN0q6VpXWP2pDGIoQ7hw==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-environment-visitor": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", + "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-function-name": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", + "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-hoist-variables": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", + "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz", + "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==", + "dev": true, + "dependencies": { + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz", + "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz", + "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-simple-access": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz", + "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz", + "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.22.15", + "@babel/helper-optimise-call-expression": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-simple-access": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz", + "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-split-export-declaration": { + "version": "7.22.6", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", + "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "dev": true, + "dependencies": { + "@babel/types": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", + "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", + "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz", + "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz", + "integrity": "sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw==", + "dev": true, + "dependencies": { + "@babel/helper-function-name": "^7.22.5", + "@babel/template": "^7.22.15", + "@babel/types": "^7.22.19" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.23.1", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.1.tgz", + "integrity": "sha512-chNpneuK18yW5Oxsr+t553UZzzAs3aZnFm4bxhebsNTeshrC95yA7l5yl7GBAG+JG1rF0F7zzD2EixK9mWSDoA==", + "dev": true, + "dependencies": { + "@babel/template": "^7.22.15", + "@babel/traverse": "^7.23.0", + "@babel/types": "^7.23.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", + "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "dev": true, + "dependencies": { + "@babel/helper-validator-identifier": "^7.22.20", + "chalk": "^2.4.2", + "js-tokens": "^4.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/highlight/node_modules/ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "dev": true, + "dependencies": { + "color-convert": "^1.9.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/chalk": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", + "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "dev": true, + "dependencies": { + "color-name": "1.1.3" + } + }, + "node_modules/@babel/highlight/node_modules/color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", + "dev": true + }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/@babel/highlight/node_modules/has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/highlight/node_modules/supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "dev": true, + "dependencies": { + "has-flag": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/@babel/parser": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", + "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "dev": true, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.15.tgz", + "integrity": "sha512-FB9iYlz7rURmRJyXRKEnalYPPdn87H5no108cyuQQyMwlpJ2SJtpIUBI27kdTin956pz+LPypkPVPUTlxOmrsg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.15.tgz", + "integrity": "sha512-Hyph9LseGvAeeXzikV88bczhsrLrIZqDPxO+sSmAunMPaGrBGhfMWzCPYTtiW9t+HzSE2wtV8e5cc5P6r1xMDQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "dev": true, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-async-generators": { + "version": "7.8.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", + "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-bigint": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz", + "integrity": "sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-properties": { + "version": "7.12.13", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", + "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.12.13" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-class-static-block": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", + "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", + "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-export-namespace-from": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", + "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz", + "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz", + "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", + "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz", + "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-logical-assignment-operators": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", + "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-numeric-separator": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", + "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-object-rest-spread": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", + "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-catch-binding": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", + "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.8.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-private-property-in-object": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", + "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-top-level-await": { + "version": "7.14.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", + "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz", + "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz", + "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.15.tgz", + "integrity": "sha512-jBm1Es25Y+tVoTi5rfd5t1KLmL8ogLKpXszboWOTTtGFGz2RKnQe2yn7HbZ+kb/B8N0FVSGQo874NSlOU1T4+w==", + "dev": true, + "dependencies": { + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.9", + "@babel/plugin-syntax-async-generators": "^7.8.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz", + "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-imports": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-remap-async-to-generator": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz", + "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.0.tgz", + "integrity": "sha512-cOsrbmIOXmf+5YbL99/S49Y3j46k/T16b9ml8bm9lP6N9US5iQ2yBK7gpui1pg0V/WMcXdkfKbTb7HXq9u+v4g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz", + "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.11.tgz", + "integrity": "sha512-GMM8gGmqI7guS/llMFk1bJDkKfn3v3C4KHK9Yg1ey5qcHcOlKb0QvcMrgzvxo+T03/4szNh5lghY+fEC98Kq9g==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.15.tgz", + "integrity": "sha512-VbbC3PGjBdE0wAWDdHM9G8Gm977pnYI0XpqMd6LrKISj8/DJXEsWqgRuTYaNE9Bv0JGhTZUzHDlMk18IpOuoqw==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-environment-visitor": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.9", + "@babel/helper-split-export-declaration": "^7.22.6", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz", + "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/template": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.0.tgz", + "integrity": "sha512-vaMdgNXFkYrB+8lbgniSYWHsgqK5gjaMNcc84bMIOMRLH0L9AqYq3hwMdvnyqj1OPqea8UtjPEuS/DCenah1wg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz", + "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz", + "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.11.tgz", + "integrity": "sha512-g/21plo58sfteWjaO0ZNVb+uEOkJNjAaHhbejrnBmu011l/eNDScmkbjCC3l4FKb10ViaGU4aOkFznSu2zRHgA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz", + "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==", + "dev": true, + "dependencies": { + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.11.tgz", + "integrity": "sha512-xa7aad7q7OiT8oNZ1mU7NrISjlSkVdMbNxn9IuLZyL9AJEhs1Apba3I+u5riX1dIkdptP5EKDG5XDPByWxtehw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.15.tgz", + "integrity": "sha512-me6VGeHsx30+xh9fbDLLPi0J1HzmeIIyenoOQHuw2D4m2SAU3NrspX5XxJLBpqn5yrLzrlw2Iy3RA//Bx27iOA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz", + "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.5", + "@babel/helper-function-name": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.11.tgz", + "integrity": "sha512-CxT5tCqpA9/jXFlme9xIBCc5RPtdDq3JpkkhgHQqtDdiTnTI0jtZ0QzXhr5DILeYifDPp2wvY2ad+7+hLMW5Pw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-json-strings": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz", + "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.11.tgz", + "integrity": "sha512-qQwRTP4+6xFCDV5k7gZBF3C31K34ut0tbEcTKxlX/0KXxm9GLcO14p570aWxFvVzx6QAfPgq7gaeIHXJC8LswQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz", + "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.0.tgz", + "integrity": "sha512-xWT5gefv2HGSm4QHtgc1sYPbseOyf+FFDo2JbpE25GWl5BqTGO9IMwTYJRoIdjsF85GE+VegHxSCUt5EvoYTAw==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz", + "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-simple-access": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.0.tgz", + "integrity": "sha512-qBej6ctXZD2f+DhlOC9yO47yEYgUh5CZNz/aBoH4j/3NOlRfJXJbY7xDQCqQVf9KbrqGzIWER1f23doHGrIHFg==", + "dev": true, + "dependencies": { + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.0", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz", + "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==", + "dev": true, + "dependencies": { + "@babel/helper-module-transforms": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz", + "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.11.tgz", + "integrity": "sha512-YZWOw4HxXrotb5xsjMJUDlLgcDXSfO9eCmdl1bgW4+/lAGdkjaEvOnQ4p5WKKdUgSzO39dgPl0pTnfxm0OAXcg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.11.tgz", + "integrity": "sha512-3dzU4QGPsILdJbASKhF/V2TVP+gJya1PsueQCxIPCEcerqF21oEcrob4mzjsp2Py/1nLfF5m+xYNMDpmA8vffg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.15.tgz", + "integrity": "sha512-fEB+I1+gAmfAyxZcX1+ZUwLeAuuf8VIg67CTznZE0MqVFumWkh8xWtn58I4dxdVf080wn7gzWoF8vndOViJe9Q==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.9", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz", + "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-replace-supers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.11.tgz", + "integrity": "sha512-rli0WxesXUeCJnMYhzAglEjLWVDF6ahb45HuprcmQuLidBJFWjNnOzssk2kuc6e33FlLaiZhG/kUIzUMWdBKaQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.0.tgz", + "integrity": "sha512-sBBGXbLJjxTzLBF5rFWaikMnOGOk/BmK6vVByIdEggZ7Vn6CvWXZyRkkLFK6WE0IF8jSliyOkUN6SScFgzCM0g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.15.tgz", + "integrity": "sha512-hjk7qKIqhyzhhUvRT683TYQOFa/4cQKwQy7ALvTpODswN40MljzNDa0YldevS6tGbxwaEKVn502JmY0dP7qEtQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz", + "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.22.11", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.11.tgz", + "integrity": "sha512-sSCbqZDBKHetvjSwpyWzhuHkmW5RummxJBVbYLkGkaiTOWGxml7SXt0iWa03bzxFIx7wOj3g/ILRd0RcJKBeSQ==", + "dev": true, + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.22.11", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz", + "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.10.tgz", + "integrity": "sha512-F28b1mDt8KcT5bUyJc/U9nwzw6cV+UmTeRlXYIl2TNqMMJif0Jeey9/RQ3C4NOd2zp0/TRsDns9ttj2L523rsw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz", + "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz", + "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz", + "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz", + "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz", + "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz", + "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.22.10", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.10.tgz", + "integrity": "sha512-lRfaRKGZCBqDlRU3UIFovdp9c9mEvlylmpod0/OatICsSfuQ9YFthRo1tpTkGsklEefZdqlEFdY4A2dwTb6ohg==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz", + "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz", + "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz", + "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.20.tgz", + "integrity": "sha512-11MY04gGC4kSzlPHRfvVkNAZhUxOvm7DCJ37hPDnUENwe06npjIRAfInEMTGSb4LZK5ZgDFkv5hw0lGebHeTyg==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.20", + "@babel/helper-compilation-targets": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/helper-validator-option": "^7.22.15", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.15", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.15", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.22.5", + "@babel/plugin-syntax-import-attributes": "^7.22.5", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.22.5", + "@babel/plugin-transform-async-generator-functions": "^7.22.15", + "@babel/plugin-transform-async-to-generator": "^7.22.5", + "@babel/plugin-transform-block-scoped-functions": "^7.22.5", + "@babel/plugin-transform-block-scoping": "^7.22.15", + "@babel/plugin-transform-class-properties": "^7.22.5", + "@babel/plugin-transform-class-static-block": "^7.22.11", + "@babel/plugin-transform-classes": "^7.22.15", + "@babel/plugin-transform-computed-properties": "^7.22.5", + "@babel/plugin-transform-destructuring": "^7.22.15", + "@babel/plugin-transform-dotall-regex": "^7.22.5", + "@babel/plugin-transform-duplicate-keys": "^7.22.5", + "@babel/plugin-transform-dynamic-import": "^7.22.11", + "@babel/plugin-transform-exponentiation-operator": "^7.22.5", + "@babel/plugin-transform-export-namespace-from": "^7.22.11", + "@babel/plugin-transform-for-of": "^7.22.15", + "@babel/plugin-transform-function-name": "^7.22.5", + "@babel/plugin-transform-json-strings": "^7.22.11", + "@babel/plugin-transform-literals": "^7.22.5", + "@babel/plugin-transform-logical-assignment-operators": "^7.22.11", + "@babel/plugin-transform-member-expression-literals": "^7.22.5", + "@babel/plugin-transform-modules-amd": "^7.22.5", + "@babel/plugin-transform-modules-commonjs": "^7.22.15", + "@babel/plugin-transform-modules-systemjs": "^7.22.11", + "@babel/plugin-transform-modules-umd": "^7.22.5", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.22.5", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.11", + "@babel/plugin-transform-numeric-separator": "^7.22.11", + "@babel/plugin-transform-object-rest-spread": "^7.22.15", + "@babel/plugin-transform-object-super": "^7.22.5", + "@babel/plugin-transform-optional-catch-binding": "^7.22.11", + "@babel/plugin-transform-optional-chaining": "^7.22.15", + "@babel/plugin-transform-parameters": "^7.22.15", + "@babel/plugin-transform-private-methods": "^7.22.5", + "@babel/plugin-transform-private-property-in-object": "^7.22.11", + "@babel/plugin-transform-property-literals": "^7.22.5", + "@babel/plugin-transform-regenerator": "^7.22.10", + "@babel/plugin-transform-reserved-words": "^7.22.5", + "@babel/plugin-transform-shorthand-properties": "^7.22.5", + "@babel/plugin-transform-spread": "^7.22.5", + "@babel/plugin-transform-sticky-regex": "^7.22.5", + "@babel/plugin-transform-template-literals": "^7.22.5", + "@babel/plugin-transform-typeof-symbol": "^7.22.5", + "@babel/plugin-transform-unicode-escapes": "^7.22.10", + "@babel/plugin-transform-unicode-property-regex": "^7.22.5", + "@babel/plugin-transform-unicode-regex": "^7.22.5", + "@babel/plugin-transform-unicode-sets-regex": "^7.22.5", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "@babel/types": "^7.22.19", + "babel-plugin-polyfill-corejs2": "^0.4.5", + "babel-plugin-polyfill-corejs3": "^0.8.3", + "babel-plugin-polyfill-regenerator": "^0.5.2", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", + "dev": true + }, + "node_modules/@babel/runtime": { + "version": "7.23.1", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.23.1.tgz", + "integrity": "sha512-hC2v6p8ZSI/W0HUzh3V8C5g+NwSKzKPtJwSpTjwl0o297GP9+ZLQSkdvHz46CM3LqyoXxq+5G9komY+eSqSO0g==", + "dev": true, + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", + "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/parser": "^7.22.15", + "@babel/types": "^7.22.15" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", + "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.22.13", + "@babel/generator": "^7.23.0", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.22.6", + "@babel/parser": "^7.23.0", + "@babel/types": "^7.23.0", + "debug": "^4.1.0", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.23.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", + "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "dev": true, + "dependencies": { + "@babel/helper-string-parser": "^7.22.5", + "@babel/helper-validator-identifier": "^7.22.20", + "to-fast-properties": "^2.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true + }, + "node_modules/@istanbuljs/load-nyc-config": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz", + "integrity": "sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==", + "dev": true, + "dependencies": { + "camelcase": "^5.3.1", + "find-up": "^4.1.0", + "get-package-type": "^0.1.0", + "js-yaml": "^3.13.1", + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@jest/console": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/console/-/console-29.7.0.tgz", + "integrity": "sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/core": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/core/-/core-29.7.0.tgz", + "integrity": "sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/reporters": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-changed-files": "^29.7.0", + "jest-config": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-resolve-dependencies": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "jest-watcher": "^29.7.0", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/environment": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/environment/-/environment-29.7.0.tgz", + "integrity": "sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==", + "dev": true, + "dependencies": { + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==", + "dev": true, + "dependencies": { + "expect": "^29.7.0", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/fake-timers": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/fake-timers/-/fake-timers-29.7.0.tgz", + "integrity": "sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@sinonjs/fake-timers": "^10.0.2", + "@types/node": "*", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/globals": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/globals/-/globals-29.7.0.tgz", + "integrity": "sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/types": "^29.6.3", + "jest-mock": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/reporters": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/reporters/-/reporters-29.7.0.tgz", + "integrity": "sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==", + "dev": true, + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@jest/console": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "@types/node": "*", + "chalk": "^4.0.0", + "collect-v8-coverage": "^1.0.0", + "exit": "^0.1.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "istanbul-lib-coverage": "^3.0.0", + "istanbul-lib-instrument": "^6.0.0", + "istanbul-lib-report": "^3.0.0", + "istanbul-lib-source-maps": "^4.0.0", + "istanbul-reports": "^3.1.3", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "slash": "^3.0.0", + "string-length": "^4.0.1", + "strip-ansi": "^6.0.0", + "v8-to-istanbul": "^9.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dev": true, + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/source-map": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/source-map/-/source-map-29.6.3.tgz", + "integrity": "sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.18", + "callsites": "^3.0.0", + "graceful-fs": "^4.2.9" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-result": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-result/-/test-result-29.7.0.tgz", + "integrity": "sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "collect-v8-coverage": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/test-sequencer": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz", + "integrity": "sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/transform": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/transform/-/transform-29.7.0.tgz", + "integrity": "sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/types": "^29.6.3", + "@jridgewell/trace-mapping": "^0.3.18", + "babel-plugin-istanbul": "^6.1.1", + "chalk": "^4.0.0", + "convert-source-map": "^2.0.0", + "fast-json-stable-stringify": "^2.1.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "micromatch": "^4.0.4", + "pirates": "^4.0.4", + "slash": "^3.0.0", + "write-file-atomic": "^4.0.2" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/types": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "@types/istanbul-lib-coverage": "^2.0.0", + "@types/istanbul-reports": "^3.0.0", + "@types/node": "*", + "@types/yargs": "^17.0.8", + "chalk": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", + "integrity": "sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==", + "dev": true, + "dependencies": { + "@jridgewell/set-array": "^1.0.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.9" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz", + "integrity": "sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", + "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "dev": true, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", + "dev": true + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.19", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz", + "integrity": "sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==", + "dev": true, + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==", + "dev": true + }, + "node_modules/@sinonjs/commons": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@sinonjs/commons/-/commons-3.0.0.tgz", + "integrity": "sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==", + "dev": true, + "dependencies": { + "type-detect": "4.0.8" + } + }, + "node_modules/@sinonjs/fake-timers": { + "version": "10.3.0", + "resolved": "https://registry.npmjs.org/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz", + "integrity": "sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==", + "dev": true, + "dependencies": { + "@sinonjs/commons": "^3.0.0" + } + }, + "node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "engines": { + "node": ">= 10" + } + }, + "node_modules/@types/babel__core": { + "version": "7.20.2", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.2.tgz", + "integrity": "sha512-pNpr1T1xLUc2l3xJKuPtsEky3ybxN3m4fJkknfIpTCTfIZCDW57oAg+EfCgIIp2rvCe0Wn++/FfodDS4YXxBwA==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.6.5", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.5.tgz", + "integrity": "sha512-h9yIuWbJKdOPLJTbmSpPzkF67e659PbQDba7ifWm5BJ8xTv+sDmS7rFmywkWOvXedGTivCdeGSIIX8WLcRTz8w==", + "dev": true, + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.2.tgz", + "integrity": "sha512-/AVzPICMhMOMYoSx9MoKpGDKdBRsIXMNByh1PXSZoa+v6ZoLa8xxtsT/uLQ/NJm0XVAWl/BvId4MlDeXJaeIZQ==", + "dev": true, + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.20.2", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.2.tgz", + "integrity": "sha512-ojlGK1Hsfce93J0+kn3H5R73elidKUaZonirN33GSmgTUMpzI/MIFfSpF3haANe3G1bEBS9/9/QEqwTzwqFsKw==", + "dev": true, + "dependencies": { + "@babel/types": "^7.20.7" + } + }, + "node_modules/@types/graceful-fs": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.7.tgz", + "integrity": "sha512-MhzcwU8aUygZroVwL2jeYk6JisJrPl/oov/gsgGCue9mkgl9wjGbzReYQClxiUgFDnib9FuHqTndccKeZKxTRw==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", + "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==", + "dev": true + }, + "node_modules/@types/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-gPQuzaPR5h/djlAv2apEG1HVOyj1IUs7GpfMZixU0/0KXT3pm64ylHuMUI1/Akh+sq/iikxg6Z2j+fcMDXaaTQ==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-coverage": "*" + } + }, + "node_modules/@types/istanbul-reports": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.2.tgz", + "integrity": "sha512-kv43F9eb3Lhj+lr/Hn6OcLCs/sSM8bt+fIaP11rCYngfV6NVjzWXJ17owQtDQTL9tQ8WSLUrGsSJ6rJz0F1w1A==", + "dev": true, + "dependencies": { + "@types/istanbul-lib-report": "*" + } + }, + "node_modules/@types/jest": { + "version": "29.5.5", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.5.tgz", + "integrity": "sha512-ebylz2hnsWR9mYvmBFbXJXr+33UPc4+ZdxyDXh5w0FlPBTfCVN3wPL+kuOiQt3xvrK419v7XWeAs+AeOksafXg==", + "dev": true, + "dependencies": { + "expect": "^29.0.0", + "pretty-format": "^29.0.0" + } + }, + "node_modules/@types/jsdom": { + "version": "20.0.1", + "resolved": "https://registry.npmjs.org/@types/jsdom/-/jsdom-20.0.1.tgz", + "integrity": "sha512-d0r18sZPmMQr1eG35u12FZfhIXNrnsPU/g5wvRKCUf/tOGilKKwYMYGqh33BNR6ba+2gkHw1EUiHoN3mn7E5IQ==", + "dev": true, + "dependencies": { + "@types/node": "*", + "@types/tough-cookie": "*", + "parse5": "^7.0.0" + } + }, + "node_modules/@types/node": { + "version": "20.8.3", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.3.tgz", + "integrity": "sha512-jxiZQFpb+NlH5kjW49vXxvxTjeeqlbsnTAdBTKpzEdPs9itay7MscYXz3Fo9VYFEsfQ6LJFitHad3faerLAjCw==", + "dev": true + }, + "node_modules/@types/stack-utils": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", + "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==", + "dev": true + }, + "node_modules/@types/tough-cookie": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.3.tgz", + "integrity": "sha512-THo502dA5PzG/sfQH+42Lw3fvmYkceefOspdCwpHRul8ik2Jv1K8I5OZz1AT3/rs46kwgMCe9bSBmDLYkkOMGg==", + "dev": true + }, + "node_modules/@types/yargs": { + "version": "17.0.28", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.28.tgz", + "integrity": "sha512-N3e3fkS86hNhtk6BEnc0rj3zcehaxx8QWhCROJkqpl5Zaoi7nAic3jH8q94jVD3zu5LGk+PUB6KAiDmimYOEQw==", + "dev": true, + "dependencies": { + "@types/yargs-parser": "*" + } + }, + "node_modules/@types/yargs-parser": { + "version": "21.0.1", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.1.tgz", + "integrity": "sha512-axdPBuLuEJt0c4yI5OZssC19K2Mq1uKdrfZBzuxLvaztgqUtFYZUNw7lETExPYJR9jdEoIg4mb7RQKRQzOkeGQ==", + "dev": true + }, + "node_modules/abab": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", + "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "dev": true + }, + "node_modules/acorn": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", + "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "dev": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-globals": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz", + "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==", + "dev": true, + "dependencies": { + "acorn": "^8.1.0", + "acorn-walk": "^8.0.2" + } + }, + "node_modules/acorn-walk": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", + "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "dev": true + }, + "node_modules/babel-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/babel-jest/-/babel-jest-29.7.0.tgz", + "integrity": "sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==", + "dev": true, + "dependencies": { + "@jest/transform": "^29.7.0", + "@types/babel__core": "^7.1.14", + "babel-plugin-istanbul": "^6.1.1", + "babel-preset-jest": "^29.6.3", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.8.0" + } + }, + "node_modules/babel-plugin-istanbul": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz", + "integrity": "sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@istanbuljs/load-nyc-config": "^1.0.0", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-instrument": "^5.0.4", + "test-exclude": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-istanbul/node_modules/istanbul-lib-instrument": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz", + "integrity": "sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^6.3.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/babel-plugin-jest-hoist": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz", + "integrity": "sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==", + "dev": true, + "dependencies": { + "@babel/template": "^7.3.3", + "@babel/types": "^7.3.3", + "@types/babel__core": "^7.1.14", + "@types/babel__traverse": "^7.0.6" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.5", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.5.tgz", + "integrity": "sha512-19hwUH5FKl49JEsvyTcoHakh6BE0wgXLLptIyKZ3PijHc/Ci521wygORCUCCred+E/twuqRyAkE02BAWPmsHOg==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.4.2", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.8.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.4.tgz", + "integrity": "sha512-9l//BZZsPR+5XjyJMPtZSK4jv0BsTO1zDac2GC6ygx9WLGlcsnRd1Co0B2zT5fF5Ic6BZy+9m3HNZ3QcOeDKfg==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.2", + "core-js-compat": "^3.32.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.2.tgz", + "integrity": "sha512-tAlOptU0Xj34V1Y2PNTL4Y0FOJMDB6bZmoW39FeCQIhigGLkqu3Fj6uiXpxIf6Ij274ENdYx64y6Au+ZKlb1IA==", + "dev": true, + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.4.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-preset-current-node-syntax": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", + "integrity": "sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ==", + "dev": true, + "dependencies": { + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-bigint": "^7.8.3", + "@babel/plugin-syntax-class-properties": "^7.8.3", + "@babel/plugin-syntax-import-meta": "^7.8.3", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-top-level-await": "^7.8.3" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/babel-preset-jest": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz", + "integrity": "sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==", + "dev": true, + "dependencies": { + "babel-plugin-jest-hoist": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", + "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "dev": true, + "dependencies": { + "fill-range": "^7.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz", + "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "caniuse-lite": "^1.0.30001541", + "electron-to-chromium": "^1.4.535", + "node-releases": "^2.0.13", + "update-browserslist-db": "^1.0.13" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bser": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/bser/-/bser-2.1.1.tgz", + "integrity": "sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==", + "dev": true, + "dependencies": { + "node-int64": "^0.4.0" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", + "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001546", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001546.tgz", + "integrity": "sha512-zvtSJwuQFpewSyRrI3AsftF6rM0X80mZkChIt1spBGEvRglCrjTniXvinc8JKRoqTwXAgvqTImaN9igfSMtUBw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/char-regex": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/char-regex/-/char-regex-1.0.2.tgz", + "integrity": "sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cjs-module-lexer": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz", + "integrity": "sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ==", + "dev": true + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==", + "dev": true, + "engines": { + "iojs": ">= 1.0.0", + "node": ">= 0.12.0" + } + }, + "node_modules/collect-v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz", + "integrity": "sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==", + "dev": true + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dev": true, + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true + }, + "node_modules/core-js-compat": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.33.0.tgz", + "integrity": "sha512-0w4LcLXsVEuNkIqwjjf9rjCoPhK8uqA4tMRh4Ge26vfLtUutshn+aRJU21I9LCJlh2QQHfisNToLjw1XEJLTWw==", + "dev": true, + "dependencies": { + "browserslist": "^4.22.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/create-jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/create-jest/-/create-jest-29.7.0.tgz", + "integrity": "sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "exit": "^0.1.2", + "graceful-fs": "^4.2.9", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "prompts": "^2.0.1" + }, + "bin": { + "create-jest": "bin/create-jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssom": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.5.0.tgz", + "integrity": "sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw==", + "dev": true + }, + "node_modules/cssstyle": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-2.3.0.tgz", + "integrity": "sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A==", + "dev": true, + "dependencies": { + "cssom": "~0.3.6" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cssstyle/node_modules/cssom": { + "version": "0.3.8", + "resolved": "https://registry.npmjs.org/cssom/-/cssom-0.3.8.tgz", + "integrity": "sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg==", + "dev": true + }, + "node_modules/data-urls": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz", + "integrity": "sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ==", + "dev": true, + "dependencies": { + "abab": "^2.0.6", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dev": true, + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", + "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", + "dev": true + }, + "node_modules/dedent": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.1.tgz", + "integrity": "sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==", + "dev": true, + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "dev": true, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-newline": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-3.1.0.tgz", + "integrity": "sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/diff-sequences": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/domexception": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", + "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", + "dev": true, + "dependencies": { + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.4.544", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.544.tgz", + "integrity": "sha512-54z7squS1FyFRSUqq/knOFSptjjogLZXbKcYk3B0qkE1KZzvqASwRZnY2KzZQJqIYLVD38XZeoiMRflYSwyO4w==", + "dev": true + }, + "node_modules/emittery": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-0.13.1.tgz", + "integrity": "sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "dev": true, + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/escalade": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/escodegen": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", + "dev": true, + "dependencies": { + "esprima": "^4.0.1", + "estraverse": "^5.2.0", + "esutils": "^2.0.2" + }, + "bin": { + "escodegen": "bin/escodegen.js", + "esgenerate": "bin/esgenerate.js" + }, + "engines": { + "node": ">=6.0" + }, + "optionalDependencies": { + "source-map": "~0.6.1" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exit": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz", + "integrity": "sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/expect": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", + "dev": true, + "dependencies": { + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true + }, + "node_modules/fb-watchman": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/fb-watchman/-/fb-watchman-2.0.2.tgz", + "integrity": "sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==", + "dev": true, + "dependencies": { + "bser": "2.1.1" + } + }, + "node_modules/fill-range": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", + "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "dev": true, + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dev": true, + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-package-type": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/get-package-type/-/get-package-type-0.1.0.tgz", + "integrity": "sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==", + "dev": true, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dev": true, + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true + }, + "node_modules/has": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", + "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", + "dev": true, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/html-encoding-sniffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", + "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", + "dev": true, + "dependencies": { + "whatwg-encoding": "^2.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true + }, + "node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/import-local": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/import-local/-/import-local-3.1.0.tgz", + "integrity": "sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg==", + "dev": true, + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dev": true, + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-core-module": { + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.0.tgz", + "integrity": "sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==", + "dev": true, + "dependencies": { + "has": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-generator-fn/-/is-generator-fn-2.1.0.tgz", + "integrity": "sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz", + "integrity": "sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-instrument": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.1.tgz", + "integrity": "sha512-EAMEJBsYuyyztxMxW3g7ugGPkrZsV57v0Hmv3mm1uQsmB+QnZuepg731CRaIgeUVSdmsTngOkSnauNF8p7FIhA==", + "dev": true, + "dependencies": { + "@babel/core": "^7.12.3", + "@babel/parser": "^7.14.7", + "@istanbuljs/schema": "^0.1.2", + "istanbul-lib-coverage": "^3.2.0", + "semver": "^7.5.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-instrument/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz", + "integrity": "sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==", + "dev": true, + "dependencies": { + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0", + "source-map": "^0.6.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.6.tgz", + "integrity": "sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==", + "dev": true, + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jest": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest/-/jest-29.7.0.tgz", + "integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/types": "^29.6.3", + "import-local": "^3.0.2", + "jest-cli": "^29.7.0" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-changed-files": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-changed-files/-/jest-changed-files-29.7.0.tgz", + "integrity": "sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==", + "dev": true, + "dependencies": { + "execa": "^5.0.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-circus": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-circus/-/jest-circus-29.7.0.tgz", + "integrity": "sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/expect": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "co": "^4.6.0", + "dedent": "^1.0.0", + "is-generator-fn": "^2.0.0", + "jest-each": "^29.7.0", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "p-limit": "^3.1.0", + "pretty-format": "^29.7.0", + "pure-rand": "^6.0.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-cli": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-cli/-/jest-cli-29.7.0.tgz", + "integrity": "sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==", + "dev": true, + "dependencies": { + "@jest/core": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "create-jest": "^29.7.0", + "exit": "^0.1.2", + "import-local": "^3.0.2", + "jest-config": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "yargs": "^17.3.1" + }, + "bin": { + "jest": "bin/jest.js" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "node-notifier": "^8.0.1 || ^9.0.0 || ^10.0.0" + }, + "peerDependenciesMeta": { + "node-notifier": { + "optional": true + } + } + }, + "node_modules/jest-config": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-config/-/jest-config-29.7.0.tgz", + "integrity": "sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@jest/test-sequencer": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-jest": "^29.7.0", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "deepmerge": "^4.2.2", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-circus": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-runner": "^29.7.0", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "micromatch": "^4.0.4", + "parse-json": "^5.2.0", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "@types/node": "*", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/jest-diff": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-docblock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-docblock/-/jest-docblock-29.7.0.tgz", + "integrity": "sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==", + "dev": true, + "dependencies": { + "detect-newline": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-each": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-each/-/jest-each-29.7.0.tgz", + "integrity": "sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "jest-util": "^29.7.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-environment-jsdom": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-jsdom/-/jest-environment-jsdom-29.7.0.tgz", + "integrity": "sha512-k9iQbsf9OyOfdzWH8HDmrRT0gSIcX+FLNW7IQq94tFX0gynPwqDTW0Ho6iMVNjGz/nb+l/vW3dWM2bbLLpkbXA==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/jsdom": "^20.0.0", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0", + "jsdom": "^20.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jest-environment-node": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-environment-node/-/jest-environment-node-29.7.0.tgz", + "integrity": "sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-mock": "^29.7.0", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-get-type": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-haste-map": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-haste-map/-/jest-haste-map-29.7.0.tgz", + "integrity": "sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/graceful-fs": "^4.1.3", + "@types/node": "*", + "anymatch": "^3.0.3", + "fb-watchman": "^2.0.0", + "graceful-fs": "^4.2.9", + "jest-regex-util": "^29.6.3", + "jest-util": "^29.7.0", + "jest-worker": "^29.7.0", + "micromatch": "^4.0.4", + "walker": "^1.0.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + }, + "optionalDependencies": { + "fsevents": "^2.3.2" + } + }, + "node_modules/jest-leak-detector": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz", + "integrity": "sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==", + "dev": true, + "dependencies": { + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-matcher-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-message-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.12.13", + "@jest/types": "^29.6.3", + "@types/stack-utils": "^2.0.0", + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "micromatch": "^4.0.4", + "pretty-format": "^29.7.0", + "slash": "^3.0.0", + "stack-utils": "^2.0.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-mock": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-mock/-/jest-mock-29.7.0.tgz", + "integrity": "sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "jest-util": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-pnp-resolver": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz", + "integrity": "sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==", + "dev": true, + "engines": { + "node": ">=6" + }, + "peerDependencies": { + "jest-resolve": "*" + }, + "peerDependenciesMeta": { + "jest-resolve": { + "optional": true + } + } + }, + "node_modules/jest-regex-util": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-regex-util/-/jest-regex-util-29.6.3.tgz", + "integrity": "sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==", + "dev": true, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve/-/jest-resolve-29.7.0.tgz", + "integrity": "sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==", + "dev": true, + "dependencies": { + "chalk": "^4.0.0", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-pnp-resolver": "^1.2.2", + "jest-util": "^29.7.0", + "jest-validate": "^29.7.0", + "resolve": "^1.20.0", + "resolve.exports": "^2.0.0", + "slash": "^3.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-resolve-dependencies": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz", + "integrity": "sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==", + "dev": true, + "dependencies": { + "jest-regex-util": "^29.6.3", + "jest-snapshot": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runner": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runner/-/jest-runner-29.7.0.tgz", + "integrity": "sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==", + "dev": true, + "dependencies": { + "@jest/console": "^29.7.0", + "@jest/environment": "^29.7.0", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "graceful-fs": "^4.2.9", + "jest-docblock": "^29.7.0", + "jest-environment-node": "^29.7.0", + "jest-haste-map": "^29.7.0", + "jest-leak-detector": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-resolve": "^29.7.0", + "jest-runtime": "^29.7.0", + "jest-util": "^29.7.0", + "jest-watcher": "^29.7.0", + "jest-worker": "^29.7.0", + "p-limit": "^3.1.0", + "source-map-support": "0.5.13" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-runtime": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-runtime/-/jest-runtime-29.7.0.tgz", + "integrity": "sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==", + "dev": true, + "dependencies": { + "@jest/environment": "^29.7.0", + "@jest/fake-timers": "^29.7.0", + "@jest/globals": "^29.7.0", + "@jest/source-map": "^29.6.3", + "@jest/test-result": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "cjs-module-lexer": "^1.0.0", + "collect-v8-coverage": "^1.0.0", + "glob": "^7.1.3", + "graceful-fs": "^4.2.9", + "jest-haste-map": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-mock": "^29.7.0", + "jest-regex-util": "^29.6.3", + "jest-resolve": "^29.7.0", + "jest-snapshot": "^29.7.0", + "jest-util": "^29.7.0", + "slash": "^3.0.0", + "strip-bom": "^4.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-snapshot/-/jest-snapshot-29.7.0.tgz", + "integrity": "sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==", + "dev": true, + "dependencies": { + "@babel/core": "^7.11.6", + "@babel/generator": "^7.7.2", + "@babel/plugin-syntax-jsx": "^7.7.2", + "@babel/plugin-syntax-typescript": "^7.7.2", + "@babel/types": "^7.3.3", + "@jest/expect-utils": "^29.7.0", + "@jest/transform": "^29.7.0", + "@jest/types": "^29.6.3", + "babel-preset-current-node-syntax": "^1.0.0", + "chalk": "^4.0.0", + "expect": "^29.7.0", + "graceful-fs": "^4.2.9", + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0", + "natural-compare": "^1.4.0", + "pretty-format": "^29.7.0", + "semver": "^7.5.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-snapshot/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-snapshot/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-snapshot/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/jest-util": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "@types/node": "*", + "chalk": "^4.0.0", + "ci-info": "^3.2.0", + "graceful-fs": "^4.2.9", + "picomatch": "^2.2.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-validate/-/jest-validate-29.7.0.tgz", + "integrity": "sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==", + "dev": true, + "dependencies": { + "@jest/types": "^29.6.3", + "camelcase": "^6.2.0", + "chalk": "^4.0.0", + "jest-get-type": "^29.6.3", + "leven": "^3.1.0", + "pretty-format": "^29.7.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-validate/node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/jest-watcher": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-watcher/-/jest-watcher-29.7.0.tgz", + "integrity": "sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==", + "dev": true, + "dependencies": { + "@jest/test-result": "^29.7.0", + "@jest/types": "^29.6.3", + "@types/node": "*", + "ansi-escapes": "^4.2.1", + "chalk": "^4.0.0", + "emittery": "^0.13.1", + "jest-util": "^29.7.0", + "string-length": "^4.0.1" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.7.0.tgz", + "integrity": "sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==", + "dev": true, + "dependencies": { + "@types/node": "*", + "jest-util": "^29.7.0", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/jest-worker/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "20.0.3", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-20.0.3.tgz", + "integrity": "sha512-SYhBvTh89tTfCD/CRdSOm13mOBa42iTaTyfyEWBdKcGdPxPtLFBXuHR8XHb33YNYaP+lLbmSvBTsnoesCNJEsQ==", + "dev": true, + "dependencies": { + "abab": "^2.0.6", + "acorn": "^8.8.1", + "acorn-globals": "^7.0.0", + "cssom": "^0.5.0", + "cssstyle": "^2.3.0", + "data-urls": "^3.0.2", + "decimal.js": "^10.4.2", + "domexception": "^4.0.0", + "escodegen": "^2.0.0", + "form-data": "^4.0.0", + "html-encoding-sniffer": "^3.0.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.1", + "is-potential-custom-element-name": "^1.0.1", + "nwsapi": "^2.2.2", + "parse5": "^7.1.1", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^4.1.2", + "w3c-xmlserializer": "^4.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^2.0.0", + "whatwg-mimetype": "^3.0.0", + "whatwg-url": "^11.0.0", + "ws": "^8.11.0", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + }, + "peerDependencies": { + "canvas": "^2.5.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsesc": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", + "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "dev": true + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/make-dir/node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir/node_modules/semver": { + "version": "7.5.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", + "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "dev": true, + "dependencies": { + "lru-cache": "^6.0.0" + }, + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-dir/node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true + }, + "node_modules/makeerror": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/makeerror/-/makeerror-1.0.12.tgz", + "integrity": "sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==", + "dev": true, + "dependencies": { + "tmpl": "1.0.5" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true + }, + "node_modules/micromatch": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", + "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", + "dev": true, + "dependencies": { + "braces": "^3.0.2", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "dev": true, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dev": true, + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true + }, + "node_modules/node-int64": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/node-int64/-/node-int64-0.4.0.tgz", + "integrity": "sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==", + "dev": true + }, + "node_modules/node-releases": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz", + "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==", + "dev": true + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/nwsapi": { + "version": "2.2.7", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.7.tgz", + "integrity": "sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==", + "dev": true + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", + "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", + "dev": true, + "dependencies": { + "entities": "^4.4.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true + }, + "node_modules/picocolors": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", + "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==", + "dev": true + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "engines": { + "node": ">= 6" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/pretty-format": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", + "dev": true, + "dependencies": { + "@jest/schemas": "^29.6.3", + "ansi-styles": "^5.0.0", + "react-is": "^18.0.0" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/pretty-format/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dev": true, + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", + "dev": true + }, + "node_modules/punycode": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", + "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/pure-rand": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/pure-rand/-/pure-rand-6.0.4.tgz", + "integrity": "sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA==", + "dev": true, + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/dubzzz" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fast-check" + } + ] + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true + }, + "node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==", + "dev": true + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "dev": true + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", + "dev": true, + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==", + "dev": true + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dev": true, + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", + "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", + "dev": true, + "dependencies": { + "@babel/regjsgen": "^0.8.0", + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.1.0", + "regjsparser": "^0.9.1", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regjsparser": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", + "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", + "dev": true, + "dependencies": { + "jsesc": "~0.5.0" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", + "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", + "dev": true, + "bin": { + "jsesc": "bin/jsesc" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true + }, + "node_modules/resolve": { + "version": "1.22.6", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.6.tgz", + "integrity": "sha512-njhxM7mV12JfufShqGy3Rz8j11RPdLy4xi15UurGJeoHLfJpVXKdh3ueuOqbYUcDZnffr6X739JBo5LzyahEsw==", + "dev": true, + "dependencies": { + "is-core-module": "^2.13.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve.exports": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/resolve.exports/-/resolve.exports-2.0.2.tgz", + "integrity": "sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "dev": true + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.13", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.13.tgz", + "integrity": "sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==", + "dev": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "dev": true + }, + "node_modules/stack-utils": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", + "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", + "dev": true, + "dependencies": { + "escape-string-regexp": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-length": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-4.0.2.tgz", + "integrity": "sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==", + "dev": true, + "dependencies": { + "char-regex": "^1.0.2", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-4.0.0.tgz", + "integrity": "sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==", + "dev": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tmpl": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==", + "dev": true + }, + "node_modules/to-fast-properties": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", + "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", + "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", + "dev": true, + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tr46": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-3.0.0.tgz", + "integrity": "sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA==", + "dev": true, + "dependencies": { + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/type-detect": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", + "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", + "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dev": true, + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", + "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.1.1", + "picocolors": "^1.0.0" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, + "node_modules/v8-to-istanbul": { + "version": "9.1.3", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.1.3.tgz", + "integrity": "sha512-9lDD+EVI2fjFsMWXc6dy5JJzBsVTcQ2fVkfBvncZ6xJWG9wtBhOldG+mHkSL0+V1K/xgZz0JDO5UT5hFwHUghg==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz", + "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==", + "dev": true, + "dependencies": { + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/walker": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz", + "integrity": "sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==", + "dev": true, + "dependencies": { + "makeerror": "1.0.12" + } + }, + "node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-encoding": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", + "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", + "dev": true, + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-mimetype": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", + "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-url": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-11.0.0.tgz", + "integrity": "sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ==", + "dev": true, + "dependencies": { + "tr46": "^3.0.0", + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/ws": { + "version": "8.14.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz", + "integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==", + "dev": true, + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/tests-ui/package.json b/tests-ui/package.json new file mode 100644 index 00000000000..e7b60ad8e75 --- /dev/null +++ b/tests-ui/package.json @@ -0,0 +1,30 @@ +{ + "name": "comfui-tests", + "version": "1.0.0", + "description": "UI tests", + "main": "index.js", + "scripts": { + "test": "jest", + "test:generate": "node setup.js" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/comfyanonymous/ComfyUI.git" + }, + "keywords": [ + "comfyui", + "test" + ], + "author": "comfyanonymous", + "license": "GPL-3.0", + "bugs": { + "url": "https://github.com/comfyanonymous/ComfyUI/issues" + }, + "homepage": "https://github.com/comfyanonymous/ComfyUI#readme", + "devDependencies": { + "@babel/preset-env": "^7.22.20", + "@types/jest": "^29.5.5", + "jest": "^29.7.0", + "jest-environment-jsdom": "^29.7.0" + } +} diff --git a/tests-ui/setup.js b/tests-ui/setup.js new file mode 100644 index 00000000000..0f368ab22f9 --- /dev/null +++ b/tests-ui/setup.js @@ -0,0 +1,87 @@ +const { spawn } = require("child_process"); +const { resolve } = require("path"); +const { existsSync, mkdirSync, writeFileSync } = require("fs"); +const http = require("http"); + +async function setup() { + // Wait up to 30s for it to start + let success = false; + let child; + for (let i = 0; i < 30; i++) { + try { + await new Promise((res, rej) => { + http + .get("http://127.0.0.1:8188/object_info", (resp) => { + let data = ""; + resp.on("data", (chunk) => { + data += chunk; + }); + resp.on("end", () => { + // Modify the response data to add some checkpoints + const objectInfo = JSON.parse(data); + objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"]; + + data = JSON.stringify(objectInfo, undefined, "\t"); + + const outDir = resolve("./data"); + if (!existsSync(outDir)) { + mkdirSync(outDir); + } + + const outPath = resolve(outDir, "object_info.json"); + console.log(`Writing ${Object.keys(objectInfo).length} nodes to ${outPath}`); + writeFileSync(outPath, data, { + encoding: "utf8", + }); + res(); + }); + }) + .on("error", rej); + }); + success = true; + break; + } catch (error) { + console.log(i + "/30", error); + if (i === 0) { + // Start the server on first iteration if it fails to connect + console.log("Starting ComfyUI server..."); + + let python = resolve("../../python_embeded/python.exe"); + let args; + let cwd; + if (existsSync(python)) { + args = ["-s", "ComfyUI/main.py"]; + cwd = "../.."; + } else { + python = "python"; + args = ["main.py"]; + cwd = ".."; + } + args.push("--cpu"); + console.log(python, ...args); + child = spawn(python, args, { cwd }); + child.on("error", (err) => { + console.log(`Server error (${err})`); + i = 30; + }); + child.on("exit", (code) => { + if (!success) { + console.log(`Server exited (${code})`); + i = 30; + } + }); + } + await new Promise((r) => { + setTimeout(r, 1000); + }); + } + } + + child?.kill(); + + if (!success) { + throw new Error("Waiting for server failed..."); + } +} + + setup(); \ No newline at end of file diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js new file mode 100644 index 00000000000..022e5492667 --- /dev/null +++ b/tests-ui/tests/widgetInputs.test.js @@ -0,0 +1,319 @@ +// @ts-check +/// + +const { start, makeNodeDef, checkBeforeAndAfterReload, assertNotNullOrUndefined } = require("../utils"); +const lg = require("../utils/litegraph"); + +/** + * @typedef { import("../utils/ezgraph") } Ez + * @typedef { ReturnType["ez"] } EzNodeFactory + */ + +/** + * @param { EzNodeFactory } ez + * @param { InstanceType } graph + * @param { InstanceType } input + * @param { string } widgetType + * @param { boolean } hasControlWidget + * @returns + */ +async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasControlWidget) { + // Connect to primitive and ensure its still connected after + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(input); + + await checkBeforeAndAfterReload(graph, async () => { + primitive = graph.find(primitive); + let { connections } = primitive.outputs[0]; + expect(connections).toHaveLength(1); + expect(connections[0].targetNode.id).toBe(input.node.node.id); + + // Ensure widget is correct type + const valueWidget = primitive.widgets.value; + expect(valueWidget.widget.type).toBe(widgetType); + + // Check if control_after_generate should be added + if (hasControlWidget) { + const controlWidget = primitive.widgets.control_after_generate; + expect(controlWidget.widget.type).toBe("combo"); + } + + // Ensure we dont have other widgets + expect(primitive.node.widgets).toHaveLength(1 + +!!hasControlWidget); + }); + + return primitive; +} + +describe("widget inputs", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + [ + { name: "int", type: "INT", widget: "number", control: true }, + { name: "float", type: "FLOAT", widget: "number", control: true }, + { name: "text", type: "STRING" }, + { + name: "customtext", + type: "STRING", + opt: { multiline: true }, + }, + { name: "toggle", type: "BOOLEAN" }, + { name: "combo", type: ["a", "b", "c"], control: true }, + ].forEach((c) => { + test(`widget conversion + primitive works on ${c.name}`, async () => { + const { ez, graph } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { [c.name]: [c.type, c.opt ?? {}] }), + }); + + // Create test node and convert to input + const n = ez.TestNode(); + const w = n.widgets[c.name]; + w.convertToInput(); + expect(w.isConvertedToInput).toBeTruthy(); + const input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // @ts-ignore : input is valid here + await connectPrimitiveAndReload(ez, graph, input, c.widget ?? c.name, c.control); + }); + }); + + test("converted widget works after reload", async () => { + const { ez, graph } = await start(); + let n = ez.CheckpointLoaderSimple(); + + const inputCount = n.inputs.length; + + // Convert ckpt name to an input + n.widgets.ckpt_name.convertToInput(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + expect(n.inputs.ckpt_name).toBeTruthy(); + expect(n.inputs.length).toEqual(inputCount + 1); + + // Convert back to widget and ensure input is removed + n.widgets.ckpt_name.convertToWidget(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(n.inputs.ckpt_name).toBeFalsy(); + expect(n.inputs.length).toEqual(inputCount); + + // Convert again and reload the graph to ensure it maintains state + n.widgets.ckpt_name.convertToInput(); + expect(n.inputs.length).toEqual(inputCount + 1); + + const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", true); + + // Disconnect & reconnect + primitive.outputs[0].connections[0].disconnect(); + let { connections } = primitive.outputs[0]; + expect(connections).toHaveLength(0); + + primitive.outputs[0].connectTo(n.inputs.ckpt_name); + ({ connections } = primitive.outputs[0]); + expect(connections).toHaveLength(1); + expect(connections[0].targetNode.id).toBe(n.node.id); + + // Convert back to widget and ensure input is removed + n.widgets.ckpt_name.convertToWidget(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(n.inputs.ckpt_name).toBeFalsy(); + expect(n.inputs.length).toEqual(inputCount); + }); + + test("converted widget works on clone", async () => { + const { graph, ez } = await start(); + let n = ez.CheckpointLoaderSimple(); + + // Convert the widget to an input + n.widgets.ckpt_name.convertToInput(); + expect(n.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + + // Clone the node + n.menu["Clone"].call(); + expect(graph.nodes).toHaveLength(2); + const clone = graph.nodes[1]; + expect(clone.id).not.toEqual(n.id); + + // Ensure the clone has an input + expect(clone.widgets.ckpt_name.isConvertedToInput).toBeTruthy(); + expect(clone.inputs.ckpt_name).toBeTruthy(); + + // Ensure primitive connects to both nodes + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(n.inputs.ckpt_name); + primitive.outputs[0].connectTo(clone.inputs.ckpt_name); + expect(primitive.outputs[0].connections).toHaveLength(2); + + // Convert back to widget and ensure input is removed + clone.widgets.ckpt_name.convertToWidget(); + expect(clone.widgets.ckpt_name.isConvertedToInput).toBeFalsy(); + expect(clone.inputs.ckpt_name).toBeFalsy(); + }); + + test("shows missing node error on custom node with converted input", async () => { + const { graph } = await start(); + + const dialogShow = jest.spyOn(graph.app.ui.dialog, "show"); + + await graph.app.loadGraphData({ + last_node_id: 3, + last_link_id: 4, + nodes: [ + { + id: 1, + type: "TestNode", + pos: [41.87329101561909, 389.7381480823742], + size: { 0: 220, 1: 374 }, + flags: {}, + order: 1, + mode: 0, + inputs: [{ name: "test", type: "FLOAT", link: 4, widget: { name: "test" }, slot_index: 0 }], + outputs: [], + properties: { "Node name for S&R": "TestNode" }, + widgets_values: [1], + }, + { + id: 3, + type: "PrimitiveNode", + pos: [-312, 433], + size: { 0: 210, 1: 82 }, + flags: {}, + order: 0, + mode: 0, + outputs: [{ links: [4], widget: { name: "test" } }], + title: "test", + properties: {}, + }, + ], + links: [[4, 3, 0, 1, 6, "FLOAT"]], + groups: [], + config: {}, + extra: {}, + version: 0.4, + }); + + expect(dialogShow).toBeCalledTimes(1); + expect(dialogShow.mock.calls[0][0]).toContain("the following node types were not found"); + expect(dialogShow.mock.calls[0][0]).toContain("TestNode"); + }); + + test("defaultInput widgets can be converted back to inputs", async () => { + const { graph, ez } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { defaultInput: true }] }), + }); + + // Create test node and ensure it starts as an input + let n = ez.TestNode(); + let w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + let input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // Ensure it can be converted to + w.convertToWidget(); + expect(w.isConvertedToInput).toBeFalsy(); + expect(n.inputs.length).toEqual(0); + // and from + w.convertToInput(); + expect(w.isConvertedToInput).toBeTruthy(); + input = w.getConvertedInput(); + + // Reload and ensure it still only has 1 converted widget + if (!assertNotNullOrUndefined(input)) return; + + await connectPrimitiveAndReload(ez, graph, input, "number", true); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + + // Convert back to widget and ensure it is still a widget after reload + w.convertToWidget(); + await graph.reload(); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + expect(n.widgets[0].isConvertedToInput).toBeFalsy(); + expect(n.inputs.length).toEqual(0); + }); + + test("forceInput widgets can not be converted back to inputs", async () => { + const { graph, ez } = await start({ + mockNodeDefs: makeNodeDef("TestNode", { example: ["INT", { forceInput: true }] }), + }); + + // Create test node and ensure it starts as an input + let n = ez.TestNode(); + let w = n.widgets.example; + expect(w.isConvertedToInput).toBeTruthy(); + const input = w.getConvertedInput(); + expect(input).toBeTruthy(); + + // Convert to widget should error + expect(() => w.convertToWidget()).toThrow(); + + // Reload and ensure it still only has 1 converted widget + if (assertNotNullOrUndefined(input)) { + await connectPrimitiveAndReload(ez, graph, input, "number", true); + n = graph.find(n); + expect(n.widgets).toHaveLength(1); + expect(n.widgets.example.isConvertedToInput).toBeTruthy(); + } + }); + + test("primitive can connect to matching combos on converted widgets", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }), + ...makeNodeDef("TestNode2", { example: [["A", "B", "C"], { forceInput: true }] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const p = ez.PrimitiveNode(); + p.outputs[0].connectTo(n1.inputs[0]); + p.outputs[0].connectTo(n2.inputs[0]); + expect(p.outputs[0].connections).toHaveLength(2); + const valueWidget = p.widgets.value; + expect(valueWidget.widget.type).toBe("combo"); + expect(valueWidget.widget.options.values).toEqual(["A", "B", "C"]); + }); + + test("primitive can not connect to non matching combos on converted widgets", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C"], { forceInput: true }] }), + ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true }] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const p = ez.PrimitiveNode(); + p.outputs[0].connectTo(n1.inputs[0]); + expect(() => p.outputs[0].connectTo(n2.inputs[0])).toThrow(); + expect(p.outputs[0].connections).toHaveLength(1); + }); + + test("combo output can not connect to non matching combos list input", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", {}, [["A", "B"]]), + ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true}] }), + ...makeNodeDef("TestNode3", { example: [["A", "B", "C"], { forceInput: true}] }), + }, + }); + + const n1 = ez.TestNode1(); + const n2 = ez.TestNode2(); + const n3 = ez.TestNode3(); + + n1.outputs[0].connectTo(n2.inputs[0]); + expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow(); + }); +}); diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js new file mode 100644 index 00000000000..0e81fd47beb --- /dev/null +++ b/tests-ui/utils/ezgraph.js @@ -0,0 +1,417 @@ +// @ts-check +/// + +/** + * @typedef { import("../../web/scripts/app")["app"] } app + * @typedef { import("../../web/types/litegraph") } LG + * @typedef { import("../../web/types/litegraph").IWidget } IWidget + * @typedef { import("../../web/types/litegraph").ContextMenuItem } ContextMenuItem + * @typedef { import("../../web/types/litegraph").INodeInputSlot } INodeInputSlot + * @typedef { import("../../web/types/litegraph").INodeOutputSlot } INodeOutputSlot + * @typedef { InstanceType & { widgets?: Array } } LGNode + * @typedef { (...args: EzOutput[] | [...EzOutput[], Record]) => EzNode } EzNodeFactory + */ + +export class EzConnection { + /** @type { app } */ + app; + /** @type { InstanceType } */ + link; + + get originNode() { + return new EzNode(this.app, this.app.graph.getNodeById(this.link.origin_id)); + } + + get originOutput() { + return this.originNode.outputs[this.link.origin_slot]; + } + + get targetNode() { + return new EzNode(this.app, this.app.graph.getNodeById(this.link.target_id)); + } + + get targetInput() { + return this.targetNode.inputs[this.link.target_slot]; + } + + /** + * @param { app } app + * @param { InstanceType } link + */ + constructor(app, link) { + this.app = app; + this.link = link; + } + + disconnect() { + this.targetInput.disconnect(); + } +} + +export class EzSlot { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + + /** + * @param { EzNode } node + * @param { number } index + */ + constructor(node, index) { + this.node = node; + this.index = index; + } +} + +export class EzInput extends EzSlot { + /** @type { INodeInputSlot } */ + input; + + /** + * @param { EzNode } node + * @param { number } index + * @param { INodeInputSlot } input + */ + constructor(node, index, input) { + super(node, index); + this.input = input; + } + + disconnect() { + this.node.node.disconnectInput(this.index); + } +} + +export class EzOutput extends EzSlot { + /** @type { INodeOutputSlot } */ + output; + + /** + * @param { EzNode } node + * @param { number } index + * @param { INodeOutputSlot } output + */ + constructor(node, index, output) { + super(node, index); + this.output = output; + } + + get connections() { + return (this.node.node.outputs?.[this.index]?.links ?? []).map( + (l) => new EzConnection(this.node.app, this.node.app.graph.links[l]) + ); + } + + /** + * @param { EzInput } input + */ + connectTo(input) { + if (!input) throw new Error("Invalid input"); + + /** + * @type { LG["LLink"] | null } + */ + const link = this.node.node.connect(this.index, input.node.node, input.index); + if (!link) { + const inp = input.input; + const inName = inp.name || inp.label || inp.type; + throw new Error( + `Connecting from ${input.node.node.type}[${inName}#${input.index}] -> ${this.node.node.type}[${ + this.output.name ?? this.output.type + }#${this.index}] failed.` + ); + } + return link; + } +} + +export class EzNodeMenuItem { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + /** @type { ContextMenuItem } */ + item; + + /** + * @param { EzNode } node + * @param { number } index + * @param { ContextMenuItem } item + */ + constructor(node, index, item) { + this.node = node; + this.index = index; + this.item = item; + } + + call(selectNode = true) { + if (!this.item?.callback) throw new Error(`Menu Item ${this.item?.content ?? "[null]"} has no callback.`); + if (selectNode) { + this.node.select(); + } + this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); + } +} + +export class EzWidget { + /** @type { EzNode } */ + node; + /** @type { number } */ + index; + /** @type { IWidget } */ + widget; + + /** + * @param { EzNode } node + * @param { number } index + * @param { IWidget } widget + */ + constructor(node, index, widget) { + this.node = node; + this.index = index; + this.widget = widget; + } + + get value() { + return this.widget.value; + } + + set value(v) { + this.widget.value = v; + } + + get isConvertedToInput() { + // @ts-ignore : this type is valid for converted widgets + return this.widget.type === "converted-widget"; + } + + getConvertedInput() { + if (!this.isConvertedToInput) throw new Error(`Widget ${this.widget.name} is not converted to input.`); + + return this.node.inputs.find((inp) => inp.input["widget"]?.name === this.widget.name); + } + + convertToWidget() { + if (!this.isConvertedToInput) + throw new Error(`Widget ${this.widget.name} cannot be converted as it is already a widget.`); + this.node.menu[`Convert ${this.widget.name} to widget`].call(); + } + + convertToInput() { + if (this.isConvertedToInput) + throw new Error(`Widget ${this.widget.name} cannot be converted as it is already an input.`); + this.node.menu[`Convert ${this.widget.name} to input`].call(); + } +} + +export class EzNode { + /** @type { app } */ + app; + /** @type { LGNode } */ + node; + + /** + * @param { app } app + * @param { LGNode } node + */ + constructor(app, node) { + this.app = app; + this.node = node; + } + + get id() { + return this.node.id; + } + + get inputs() { + return this.#makeLookupArray("inputs", "name", EzInput); + } + + get outputs() { + return this.#makeLookupArray("outputs", "name", EzOutput); + } + + get widgets() { + return this.#makeLookupArray("widgets", "name", EzWidget); + } + + get menu() { + return this.#makeLookupArray(() => this.app.canvas.getNodeMenuOptions(this.node), "content", EzNodeMenuItem); + } + + select() { + this.app.canvas.selectNode(this.node); + } + + // /** + // * @template { "inputs" | "outputs" } T + // * @param { T } type + // * @returns { Record & (type extends "inputs" ? EzInput [] : EzOutput[]) } + // */ + // #getSlotItems(type) { + // // @ts-ignore : these items are correct + // return (this.node[type] ?? []).reduce((p, s, i) => { + // if (s.name in p) { + // throw new Error(`Unable to store input ${s.name} on array as name conflicts.`); + // } + // // @ts-ignore + // p.push((p[s.name] = new (type === "inputs" ? EzInput : EzOutput)(this, i, s))); + // return p; + // }, Object.assign([], { $: this })); + // } + + /** + * @template { { new(node: EzNode, index: number, obj: any): any } } T + * @param { "inputs" | "outputs" | "widgets" | (() => Array) } nodeProperty + * @param { string } nameProperty + * @param { T } ctor + * @returns { Record> & Array> } + */ + #makeLookupArray(nodeProperty, nameProperty, ctor) { + const items = typeof nodeProperty === "function" ? nodeProperty() : this.node[nodeProperty]; + // @ts-ignore + return (items ?? []).reduce((p, s, i) => { + if (!s) return p; + + const name = s[nameProperty]; + // @ts-ignore + if (!name || name in p) { + throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + } + // @ts-ignore + p.push((p[name] = new ctor(this, i, s))); + return p; + }, Object.assign([], { $: this })); + } +} + +export class EzGraph { + /** @type { app } */ + app; + + /** + * @param { app } app + */ + constructor(app) { + this.app = app; + } + + get nodes() { + return this.app.graph._nodes.map((n) => new EzNode(this.app, n)); + } + + clear() { + this.app.graph.clear(); + } + + arrange() { + this.app.graph.arrange(); + } + + stringify() { + return JSON.stringify(this.app.graph.serialize(), undefined, "\t"); + } + + /** + * @param { number | LGNode | EzNode } obj + * @returns { EzNode } + */ + find(obj) { + let match; + let id; + if (typeof obj === "number") { + id = obj; + } else { + id = obj.id; + } + + match = this.app.graph.getNodeById(id); + + if (!match) { + throw new Error(`Unable to find node with ID ${id}.`); + } + + return new EzNode(this.app, match); + } + + /** + * @returns { Promise } + */ + reload() { + const graph = JSON.parse(JSON.stringify(this.app.graph.serialize())); + return new Promise((r) => { + this.app.graph.clear(); + setTimeout(async () => { + await this.app.loadGraphData(graph); + r(); + }, 10); + }); + } +} + +export const Ez = { + /** + * Quickly build and interact with a ComfyUI graph + * @example + * const { ez, graph } = Ez.graph(app); + * graph.clear(); + * const [model, clip, vae] = ez.CheckpointLoaderSimple(); + * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }); + * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }); + * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage()); + * const [image] = ez.VAEDecode(latent, vae); + * const saveNode = ez.SaveImage(image).node; + * console.log(saveNode); + * graph.arrange(); + * @param { app } app + * @param { LG["LiteGraph"] } LiteGraph + * @param { LG["LGraphCanvas"] } LGraphCanvas + * @param { boolean } clearGraph + * @returns { { graph: EzGraph, ez: Record } } + */ + graph(app, LiteGraph = window["LiteGraph"], LGraphCanvas = window["LGraphCanvas"], clearGraph = true) { + // Always set the active canvas so things work + LGraphCanvas.active_canvas = app.canvas; + + if (clearGraph) { + app.graph.clear(); + } + + // @ts-ignore : this proxy handles utility methods & node creation + const factory = new Proxy( + {}, + { + get(_, p) { + if (typeof p !== "string") throw new Error("Invalid node"); + const node = LiteGraph.createNode(p); + if (!node) throw new Error(`Unknown node "${p}"`); + app.graph.add(node); + + /** + * @param {Parameters} args + */ + return function (...args) { + const ezNode = new EzNode(app, node); + const inputs = ezNode.inputs; + + let slot = 0; + for (const arg of args) { + if (arg instanceof EzOutput) { + arg.connectTo(inputs[slot++]); + } else { + for (const k in arg) { + ezNode.widgets[k].value = arg[k]; + } + } + } + + return ezNode; + }; + }, + } + ); + + return { graph: new EzGraph(app), ez: factory }; + }, +}; diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js new file mode 100644 index 00000000000..01c58b21f5c --- /dev/null +++ b/tests-ui/utils/index.js @@ -0,0 +1,71 @@ +const { mockApi } = require("./setup"); +const { Ez } = require("./ezgraph"); + +/** + * + * @param { Parameters[0] } config + * @returns + */ +export async function start(config = undefined) { + mockApi(config); + const { app } = require("../../web/scripts/app"); + await app.setup(); + return Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]); +} + +/** + * @param { ReturnType["graph"] } graph + * @param { (hasReloaded: boolean) => (Promise | void) } cb + */ +export async function checkBeforeAndAfterReload(graph, cb) { + await cb(false); + await graph.reload(); + await cb(true); +} + +/** + * @param { string } name + * @param { Record } input + * @param { (string | string[])[] | Record } output + * @returns { Record } + */ +export function makeNodeDef(name, input, output = {}) { + const nodeDef = { + name, + category: "test", + output: [], + output_name: [], + output_is_list: [], + input: { + required: {} + }, + }; + for(const k in input) { + nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]]; + } + if(output instanceof Array) { + output = output.reduce((p, c) => { + p[c] = c; + return p; + }, {}) + } + for(const k in output) { + nodeDef.output.push(output[k]); + nodeDef.output_name.push(k); + nodeDef.output_is_list.push(false); + } + + return { [name]: nodeDef }; +} + +/** +/** + * @template { any } T + * @param { T } x + * @returns { x is Exclude } + */ +export function assertNotNullOrUndefined(x) { + expect(x).not.toEqual(null); + expect(x).not.toEqual(undefined); + return true; +} \ No newline at end of file diff --git a/tests-ui/utils/litegraph.js b/tests-ui/utils/litegraph.js new file mode 100644 index 00000000000..777f8c3ba13 --- /dev/null +++ b/tests-ui/utils/litegraph.js @@ -0,0 +1,36 @@ +const fs = require("fs"); +const path = require("path"); +const { nop } = require("../utils/nopProxy"); + +function forEachKey(cb) { + for (const k of [ + "LiteGraph", + "LGraph", + "LLink", + "LGraphNode", + "LGraphGroup", + "DragAndScale", + "LGraphCanvas", + "ContextMenu", + ]) { + cb(k); + } +} + +export function setup(ctx) { + const lg = fs.readFileSync(path.resolve("../web/lib/litegraph.core.js"), "utf-8"); + const globalTemp = {}; + (function (console) { + eval(lg); + }).call(globalTemp, nop); + + forEachKey((k) => (ctx[k] = globalTemp[k])); + require(path.resolve("../web/lib/litegraph.extensions.js")); +} + +export function teardown(ctx) { + forEachKey((k) => delete ctx[k]); + + // Clear document after each run + document.getElementsByTagName("html")[0].innerHTML = ""; +} diff --git a/tests-ui/utils/nopProxy.js b/tests-ui/utils/nopProxy.js new file mode 100644 index 00000000000..2502d9d03d6 --- /dev/null +++ b/tests-ui/utils/nopProxy.js @@ -0,0 +1,6 @@ +export const nop = new Proxy(function () {}, { + get: () => nop, + set: () => true, + apply: () => nop, + construct: () => nop, +}); diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js new file mode 100644 index 00000000000..17e8ac1ad28 --- /dev/null +++ b/tests-ui/utils/setup.js @@ -0,0 +1,45 @@ +require("../../web/scripts/api"); + +const fs = require("fs"); +const path = require("path"); +function* walkSync(dir) { + const files = fs.readdirSync(dir, { withFileTypes: true }); + for (const file of files) { + if (file.isDirectory()) { + yield* walkSync(path.join(dir, file.name)); + } else { + yield path.join(dir, file.name); + } + } +} + +/** + * @typedef { import("../../web/types/comfy").ComfyObjectInfo } ComfyObjectInfo + */ + +/** + * @param { { mockExtensions?: string[], mockNodeDefs?: Record } } config + */ +export function mockApi({ mockExtensions, mockNodeDefs } = {}) { + if (!mockExtensions) { + mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core"))) + .filter((x) => x.endsWith(".js")) + .map((x) => path.relative(path.resolve("../web"), x)); + } + if (!mockNodeDefs) { + mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json"))); + } + + jest.mock("../../web/scripts/api", () => ({ + get api() { + return { + addEventListener: jest.fn(), + getSystemStats: jest.fn(), + getExtensions: jest.fn(() => mockExtensions), + getNodeDefs: jest.fn(() => mockNodeDefs), + init: jest.fn(), + apiURL: jest.fn((x) => "../../web/" + x), + }; + }, + })); +} diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index ce05a29e9ed..84abd8b7d25 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -100,6 +100,27 @@ function getWidgetType(config) { return { type }; } + +function isValidCombo(combo, obj) { + // New input isnt a combo + if (!(obj instanceof Array)) { + console.log(`connection rejected: tried to connect combo to ${obj}`); + return false; + } + // New imput combo has a different size + if (combo.length !== obj.length) { + console.log(`connection rejected: combo lists dont match`); + return false; + } + // New input combo has different elements + if (combo.find((v, i) => obj[i] !== v)) { + console.log(`connection rejected: combo lists dont match`); + return false; + } + + return true; +} + app.registerExtension({ name: "Comfy.WidgetInputs", async beforeRegisterNodeDef(nodeType, nodeData, app) { @@ -256,6 +277,28 @@ app.registerExtension({ return r; }; + + // Prevent connecting COMBO lists to converted inputs that dont match types + const onConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onConnectInput?.(this, arguments); + // Not a combo, ignore + if (type !== "COMBO") return v; + // Primitive output, allow that to handle + if (originNode.outputs[originSlot].widget) return v; + + // Ensure target is also a combo + const targetCombo = this.inputs[targetSlot].widget?.[GET_CONFIG]?.()?.[0]; + if (!targetCombo || !(targetCombo instanceof Array)) return v; + + // Check they match + const originConfig = originNode.constructor?.nodeData?.output?.[originSlot]; + if (!originConfig || !isValidCombo(targetCombo, originConfig)) { + return false; + } + + return v; + }; }, registerCustomNodes() { class PrimitiveNode { @@ -315,7 +358,7 @@ app.registerExtension({ onAfterGraphConfigured() { if (this.outputs[0].links?.length && !this.widgets?.length) { - this.#onFirstConnection(); + if (!this.#onFirstConnection()) return; // Populate widget values from config data if (this.widgets) { @@ -386,13 +429,16 @@ app.registerExtension({ widget = input.widget; } - const { type } = getWidgetType(widget[GET_CONFIG]()); + const config = widget[GET_CONFIG]?.(); + if (!config) return; + + const { type } = getWidgetType(config); // Update our output to restrict to the widget type this.outputs[0].type = type; this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget[CONFIG] ?? widget[GET_CONFIG](), theirNode, widget.name, recreating); + this.#createWidget(widget[CONFIG] ?? config, theirNode, widget.name, recreating); } #createWidget(inputData, node, widgetName, recreating) { @@ -497,21 +543,7 @@ app.registerExtension({ const config2 = input.widget[GET_CONFIG](); if (config1[0] instanceof Array) { - // New input isnt a combo - if (!(config2[0] instanceof Array)) { - console.log(`connection rejected: tried to connect combo to ${config2[0]}`); - return false; - } - // New imput combo has a different size - if (config1[0].length !== config2[0].length) { - console.log(`connection rejected: combo lists dont match`); - return false; - } - // New input combo has different elements - if (config1[0].find((v, i) => config2[0][i] !== v)) { - console.log(`connection rejected: combo lists dont match`); - return false; - } + if (!isValidCombo(config1[0], config2[0])) return false; } else if (config1[0] !== config2[0]) { // Types dont match console.log(`connection rejected: types dont match`, config1[0], config2[0]); From 25e3e5af6850119c506efc49ab0364b5bb9aa0d0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 22:52:12 -0400 Subject: [PATCH 113/420] Use npm ci for ci instead of npm install in tests. --- .github/workflows/test-ui.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 62b4c35f658..292ff5c6328 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -19,7 +19,7 @@ jobs: pip install -r requirements.txt - name: Run Tests run: | - npm install + npm ci npm run test:generate npm test working-directory: ./tests-ui From e0c0029fc1e76beed9dd61176b33fc25796a7d57 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 23:00:05 -0400 Subject: [PATCH 114/420] Try to speed up the test-ui workflow. --- .github/workflows/test-ui.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 292ff5c6328..3e96ac18f57 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -16,7 +16,7 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip - pip install -r requirements.txt + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu -r requirements.txt - name: Run Tests run: | npm ci From 77c893350a7d9b28c25356f90a0ba9981b3771f9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 20 Oct 2023 23:13:54 -0400 Subject: [PATCH 115/420] Fix previous commit that broke tests. --- .github/workflows/test-ui.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 3e96ac18f57..95069175517 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -16,7 +16,8 @@ jobs: - name: Install requirements run: | python -m pip install --upgrade pip - pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu -r requirements.txt + pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu + pip install -r requirements.txt - name: Run Tests run: | npm ci From 1443caf373c704244b11eb4113af68d353c741d4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 05:16:38 -0400 Subject: [PATCH 116/420] HyperTile node, can be found in: _for_testing->HyperTile --- comfy_extras/nodes_hypertile.py | 83 +++++++++++++++++++++++++++++++++ nodes.py | 3 +- 2 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_hypertile.py diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py new file mode 100644 index 00000000000..0d7d4c95483 --- /dev/null +++ b/comfy_extras/nodes_hypertile.py @@ -0,0 +1,83 @@ +#Taken from: https://github.com/tfernd/HyperTile/ + +import math +from einops import rearrange +import random + +def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int: + min_value = min(min_value, value) + + # All big divisors of value (inclusive) + divisors = [i for i in range(min_value, value + 1) if value % i == 0] + + ns = [value // i for i in divisors[:max_options]] # has at least 1 element + + random.seed(counter) + idx = random.randint(0, len(ns) - 1) + + return ns[idx] + +class HyperTile: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}), + "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}), + "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}), + "scale_depth": ("BOOLEAN", {"default": False}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, tile_size, swap_size, max_depth, scale_depth): + model_channels = model.model.model_config.unet_config["model_channels"] + + apply_to = set() + temp = model_channels + for x in range(max_depth + 1): + apply_to.add(temp) + temp *= 2 + + latent_tile_size = max(32, tile_size) // 8 + self.temp = None + self.counter = 1 + + def hypertile_in(q, k, v, extra_options): + if q.shape[-1] in apply_to: + shape = extra_options["original_shape"] + aspect_ratio = shape[-1] / shape[-2] + + hw = q.size(1) + h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) + + factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter) + self.counter += 1 + nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter) + self.counter += 1 + + if nh * nw > 1: + q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) + self.temp = (nh, nw, h, w) + return q, k, v + + return q, k, v + def hypertile_out(out, extra_options): + if self.temp is not None: + nh, nw, h, w = self.temp + self.temp = None + out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw) + out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw) + return out + + + m = model.clone() + m.set_model_attn1_patch(hypertile_in) + m.set_model_attn1_output_patch(hypertile_out) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "HyperTile": HyperTile, +} diff --git a/nodes.py b/nodes.py index 0dbc2be32fd..61ebbb8b49e 100644 --- a/nodes.py +++ b/nodes.py @@ -1796,7 +1796,8 @@ def init_custom_nodes(): "nodes_clip_sdxl.py", "nodes_canny.py", "nodes_freelunch.py", - "nodes_custom_sampler.py" + "nodes_custom_sampler.py", + "nodes_hypertile.py", ] for node_file in extras_files: From 9906e3efe31a0fc399262766da33c210fb4e8215 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 13:23:03 -0400 Subject: [PATCH 117/420] Make xformers work with hypertile. --- comfy/ldm/modules/attention.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9cd14a537a4..4eda361f3b5 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -253,12 +253,14 @@ def attention_split(q, k, v, heads, mask=None): return r2 def attention_xformers(q, k, v, heads, mask=None): - b, _, _ = q.shape + b, _, dim_head = q.shape + dim_head //= heads + q, k, v = map( lambda t: t.unsqueeze(3) - .reshape(b, t.shape[1], heads, -1) + .reshape(b, -1, heads, dim_head) .permute(0, 2, 1, 3) - .reshape(b * heads, t.shape[1], -1) + .reshape(b * heads, -1, dim_head) .contiguous(), (q, k, v), ) @@ -270,9 +272,9 @@ def attention_xformers(q, k, v, heads, mask=None): raise NotImplementedError out = ( out.unsqueeze(0) - .reshape(b, heads, out.shape[1], -1) + .reshape(b, heads, -1, dim_head) .permute(0, 2, 1, 3) - .reshape(b, out.shape[1], -1) + .reshape(b, -1, heads * dim_head) ) return out From a0690f9df9e731ff31fb9b0d64f1fe7cbc918789 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 20:31:24 -0400 Subject: [PATCH 118/420] Fix t2i adapter issue. --- comfy/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 73a40acfa24..f1355e64e9d 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -416,7 +416,7 @@ def get_control(self, x_noisy, t, cond, batched_number): if control_prev is not None: return control_prev else: - return {} + return None if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: From 8cfce083c4eb09ea95bce59f65f1634e09d12b13 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 21 Oct 2023 22:36:04 -0400 Subject: [PATCH 119/420] Fix primitive node control value not getting loaded. --- web/extensions/core/widgetInputs.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 84abd8b7d25..bad3ac3a74c 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -463,7 +463,11 @@ app.registerExtension({ } if (widget.type === "number" || widget.type === "combo") { - addValueControlWidget(this, widget, "fixed"); + let control_value = this.widgets_values?.[1]; + if (!control_value) { + control_value = "fixed"; + } + addValueControlWidget(this, widget, control_value); } // When our value changes, update other widgets to reflect our changes From e6bc42df4662e571365ffbafe7c2dfac2cee3116 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:51:29 -0400 Subject: [PATCH 120/420] Make sub_quad and split work with hypertile. --- comfy/ldm/modules/attention.py | 41 ++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4eda361f3b5..f8391e19a0d 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -124,11 +124,14 @@ def attention_basic(q, k, v, heads, mask=None): def attention_sub_quad(query, key, value, heads, mask=None): - scale = (query.shape[-1] // heads) ** -0.5 - query = query.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) - key_t = key.transpose(1,2).unflatten(1, (heads, -1)).flatten(end_dim=1) - del key - value = value.unflatten(-1, (heads, -1)).transpose(1,2).flatten(end_dim=1) + b, _, dim_head = query.shape + dim_head //= heads + + scale = dim_head ** -0.5 + query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head) + + key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1) dtype = query.dtype upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32 @@ -137,7 +140,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): else: bytes_per_token = torch.finfo(query.dtype).bits//8 batch_x_heads, q_tokens, _ = query.shape - _, _, k_tokens = key_t.shape + _, _, k_tokens = key.shape qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) @@ -171,7 +174,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): hidden_states = efficient_dot_product_attention( query, - key_t, + key, value, query_chunk_size=query_chunk_size, kv_chunk_size=kv_chunk_size, @@ -186,9 +189,19 @@ def attention_sub_quad(query, key, value, heads, mask=None): return hidden_states def attention_split(q, k, v, heads, mask=None): - scale = (q.shape[-1] // heads) ** -0.5 + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) @@ -248,9 +261,13 @@ def attention_split(q, k, v, heads, mask=None): del q, k, v - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - return r2 + r1 = ( + r1.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return r1 def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape From 8b65f5de54426f25cc7c08928332e5b7bf0fd25f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 03:59:53 -0400 Subject: [PATCH 121/420] attention_basic now works with hypertile. --- comfy/ldm/modules/attention.py | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f8391e19a0d..dcf467489fe 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -95,9 +95,19 @@ def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) def attention_basic(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + h = heads - scale = (q.shape[-1] // heads) ** -0.5 - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) # force cast to fp32 to avoid overflowing if _ATTN_PRECISION =="fp32": @@ -119,7 +129,12 @@ def attention_basic(q, k, v, heads, mask=None): sim = sim.softmax(dim=-1) out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) return out From 8594c8be4d8c0d7c9b5eb3d69d0c96cc80cffcc4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 13:53:59 -0400 Subject: [PATCH 122/420] Empty the cache when torch cache is more than 25% free mem. --- comfy/model_management.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 64ed19727f4..53582fc736d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -339,7 +339,11 @@ def free_memory(memory_required, device, keep_loaded=[]): if unloaded_model: soft_empty_cache() - + else: + if vram_state != VRAMState.HIGH_VRAM: + mem_free_total, mem_free_torch = get_free_memory(device, torch_free_too=True) + if mem_free_torch > mem_free_total * 0.25: + soft_empty_cache() def load_models_gpu(models, memory_required=0): global vram_state From 2ec6158e9e10cc5e1cc4b27b2930b75167db20de Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 22 Oct 2023 23:38:18 -0400 Subject: [PATCH 123/420] Call widget callback on value control to fix primitive node issue. --- web/scripts/widgets.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b023937415..2b674776937 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -84,6 +84,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random if (targetWidget.value > max) targetWidget.value = max; + targetWidget.callback(targetWidget.value); } } return valueControl; From b935bea3a0201221eca7b0337bc60a329871300a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 23 Oct 2023 21:13:50 -0400 Subject: [PATCH 124/420] The frontend can now load workflows from webp exif. --- web/scripts/app.js | 11 ++++- web/scripts/pnginfo.js | 103 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 1d3b573b117..fca5b5bd31e 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -3,7 +3,7 @@ import { ComfyWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; -import { getPngMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; +import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; /** * @typedef {import("types/comfy").ComfyExtension} ComfyExtension @@ -1790,6 +1790,15 @@ export class ComfyApp { importA1111(this.graph, pngInfo.parameters); } } + } else if (file.type === "image/webp") { + const pngInfo = await getWebpMetadata(file); + if (pngInfo) { + if (pngInfo.workflow) { + this.loadGraphData(JSON.parse(pngInfo.workflow)); + } else if (pngInfo.Workflow) { + this.loadGraphData(JSON.parse(pngInfo.Workflow)); // Support loading workflows from that webp custom node. + } + } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index c5293dfa332..42573daa0f2 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -47,6 +47,109 @@ export function getPngMetadata(file) { }); } +function parseExifData(exifData) { + // Check for the correct TIFF header (0x4949 for little-endian or 0x4D4D for big-endian) + const isLittleEndian = new Uint16Array(exifData.slice(0, 2))[0] === 0x4949; + console.log(exifData); + + // Function to read 16-bit and 32-bit integers from binary data + function readInt(offset, isLittleEndian, length) { + let arr = exifData.slice(offset, offset + length) + if (length === 2) { + return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint16(0, isLittleEndian); + } else if (length === 4) { + return new DataView(arr.buffer, arr.byteOffset, arr.byteLength).getUint32(0, isLittleEndian); + } + } + + // Read the offset to the first IFD (Image File Directory) + const ifdOffset = readInt(4, isLittleEndian, 4); + + function parseIFD(offset) { + const numEntries = readInt(offset, isLittleEndian, 2); + const result = {}; + + for (let i = 0; i < numEntries; i++) { + const entryOffset = offset + 2 + i * 12; + const tag = readInt(entryOffset, isLittleEndian, 2); + const type = readInt(entryOffset + 2, isLittleEndian, 2); + const numValues = readInt(entryOffset + 4, isLittleEndian, 4); + const valueOffset = readInt(entryOffset + 8, isLittleEndian, 4); + + // Read the value(s) based on the data type + let value; + if (type === 2) { + // ASCII string + value = String.fromCharCode(...exifData.slice(valueOffset, valueOffset + numValues - 1)); + } + + result[tag] = value; + } + + return result; + } + + // Parse the first IFD + const ifdData = parseIFD(ifdOffset); + return ifdData; +} + +function splitValues(input) { + var output = {}; + for (var key in input) { + var value = input[key]; + var splitValues = value.split(':', 2); + output[splitValues[0]] = splitValues[1]; + } + return output; +} + +export function getWebpMetadata(file) { + return new Promise((r) => { + const reader = new FileReader(); + reader.onload = (event) => { + // Get the PNG data as a Uint8Array + const pngData = new Uint8Array(event.target.result); + const dataView = new DataView(pngData.buffer); + + // Check that the PNG signature is present + if (dataView.getUint32(0) !== 0x52494646 || dataView.getUint32(8) !== 0x57454250) { + console.error("Not a valid WEBP file"); + r(); + return; + } + + // Start searching for chunks after the PNG signature + let offset = 12; + let txt_chunks = {}; + // Loop through the chunks in the PNG file + while (offset < pngData.length) { + // Get the length of the chunk + const length = dataView.getUint32(offset + 4, true); + // Get the chunk type + const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); + console.log(length, type); + if (type === "EXIF") { + // Get the keyword + let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); + for (var key in data) { + var value = data[key]; + let index = value.indexOf(':'); + txt_chunks[value.slice(0, index)] = value.slice(index + 1); + } + } + + offset += 8 + length; + } + + console.log(txt_chunks); + r(txt_chunks); + }; + + reader.readAsArrayBuffer(file); + }); +} + export function getLatentMetadata(file) { return new Promise((r) => { const reader = new FileReader(); From 5c65da312a69ddbc34a2a1384b1118fd4e21776e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 23 Oct 2023 23:39:22 -0400 Subject: [PATCH 125/420] Remove prints. --- web/scripts/pnginfo.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 42573daa0f2..4dc3a032c3c 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -128,7 +128,6 @@ export function getWebpMetadata(file) { const length = dataView.getUint32(offset + 4, true); // Get the chunk type const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); - console.log(length, type); if (type === "EXIF") { // Get the keyword let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); @@ -142,7 +141,6 @@ export function getWebpMetadata(file) { offset += 8 + length; } - console.log(txt_chunks); r(txt_chunks); }; From 3fce8881ca0f24e268fac1dc6e85d2b4cbdb0355 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 24 Oct 2023 03:38:41 -0400 Subject: [PATCH 126/420] Sampling code refactor to make it easier to add more conds. --- comfy/samplers.py | 107 ++++++++++++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 42 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 0b38fbd1e86..f88b790d8f4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -9,9 +9,58 @@ from comfy import model_base import comfy.utils + def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) return abs(a*b) // math.gcd(a, b) +class CONDRegular: + def __init__(self, cond): + self.cond = cond + + def can_concat(self, other): + if self.cond.shape != other.cond.shape: + return False + return True + + def concat(self, others): + conds = [self.cond] + for x in others: + conds.append(x.cond) + return torch.cat(conds) + +class CONDCrossAttn: + def __init__(self, cond): + self.cond = cond + + def can_concat(self, other): + s1 = self.cond.shape + s2 = other.cond.shape + if s1 != s2: + if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen + return False + + mult_min = lcm(s1[1], s2[1]) + diff = mult_min // min(s1[1], s2[1]) + if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much + return False + return True + + def concat(self, others): + conds = [self.cond] + crossattn_max_len = self.cond.shape[1] + for x in others: + c = x.cond + crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) + conds.append(c) + + out = [] + for c in conds: + if c.shape[1] < crossattn_max_len: + c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result + out.append(c) + return torch.cat(out) + + #The main sampling function shared by all the samplers #Returns predicted noise def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): @@ -67,7 +116,7 @@ def get_area_and_mult(cond, x_in, timestep_in): mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) conditionning = {} - conditionning['c_crossattn'] = cond[0] + conditionning['c_crossattn'] = CONDCrossAttn(cond[0]) if 'concat' in cond[1]: cond_concat_in = cond[1]['concat'] @@ -76,10 +125,10 @@ def get_area_and_mult(cond, x_in, timestep_in): for x in cond_concat_in: cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] cropped.append(cr) - conditionning['c_concat'] = torch.cat(cropped, dim=1) + conditionning['c_concat'] = CONDRegular(torch.cat(cropped, dim=1)) if adm_cond is not None: - conditionning['c_adm'] = adm_cond + conditionning['c_adm'] = CONDRegular(adm_cond) control = None if 'control' in cond[1]: @@ -105,22 +154,8 @@ def cond_equal_size(c1, c2): return True if c1.keys() != c2.keys(): return False - if 'c_crossattn' in c1: - s1 = c1['c_crossattn'].shape - s2 = c2['c_crossattn'].shape - if s1 != s2: - if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen - return False - - mult_min = lcm(s1[1], s2[1]) - diff = mult_min // min(s1[1], s2[1]) - if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much - return False - if 'c_concat' in c1: - if c1['c_concat'].shape != c2['c_concat'].shape: - return False - if 'c_adm' in c1: - if c1['c_adm'].shape != c2['c_adm'].shape: + for k in c1: + if not c1[k].can_concat(c2[k]): return False return True @@ -149,31 +184,19 @@ def cond_cat(c_list): c_concat = [] c_adm = [] crossattn_max_len = 0 + + temp = {} for x in c_list: - if 'c_crossattn' in x: - c = x['c_crossattn'] - if crossattn_max_len == 0: - crossattn_max_len = c.shape[1] - else: - crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) - c_crossattn.append(c) - if 'c_concat' in x: - c_concat.append(x['c_concat']) - if 'c_adm' in x: - c_adm.append(x['c_adm']) + for k in x: + cur = temp.get(k, []) + cur.append(x[k]) + temp[k] = cur + out = {} - c_crossattn_out = [] - for c in c_crossattn: - if c.shape[1] < crossattn_max_len: - c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result - c_crossattn_out.append(c) - - if len(c_crossattn_out) > 0: - out['c_crossattn'] = torch.cat(c_crossattn_out) - if len(c_concat) > 0: - out['c_concat'] = torch.cat(c_concat) - if len(c_adm) > 0: - out['c_adm'] = torch.cat(c_adm) + for k in temp: + conds = temp[k] + out[k] = conds[0].concat(conds[1:]) + return out def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): From 036f88c62166a750ecfc88175d2f6836c5707e3b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 24 Oct 2023 23:31:12 -0400 Subject: [PATCH 127/420] Refactor to make it easier to add custom conds to models. --- comfy/conds.py | 64 ++++++++++++ comfy/model_base.py | 14 ++- comfy/sample.py | 31 +++--- comfy/samplers.py | 234 +++++++++++++++----------------------------- 4 files changed, 170 insertions(+), 173 deletions(-) create mode 100644 comfy/conds.py diff --git a/comfy/conds.py b/comfy/conds.py new file mode 100644 index 00000000000..1e3111baff8 --- /dev/null +++ b/comfy/conds.py @@ -0,0 +1,64 @@ +import enum +import torch +import math +import comfy.utils + + +def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) + return abs(a*b) // math.gcd(a, b) + +class CONDRegular: + def __init__(self, cond): + self.cond = cond + + def _copy_with(self, cond): + return self.__class__(cond) + + def process_cond(self, batch_size, device, **kwargs): + return self._copy_with(comfy.utils.repeat_to_batch_size(self.cond, batch_size).to(device)) + + def can_concat(self, other): + if self.cond.shape != other.cond.shape: + return False + return True + + def concat(self, others): + conds = [self.cond] + for x in others: + conds.append(x.cond) + return torch.cat(conds) + +class CONDNoiseShape(CONDRegular): + def process_cond(self, batch_size, device, area, **kwargs): + data = self.cond[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + return self._copy_with(comfy.utils.repeat_to_batch_size(data, batch_size).to(device)) + + +class CONDCrossAttn(CONDRegular): + def can_concat(self, other): + s1 = self.cond.shape + s2 = other.cond.shape + if s1 != s2: + if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen + return False + + mult_min = lcm(s1[1], s2[1]) + diff = mult_min // min(s1[1], s2[1]) + if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much + return False + return True + + def concat(self, others): + conds = [self.cond] + crossattn_max_len = self.cond.shape[1] + for x in others: + c = x.cond + crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) + conds.append(c) + + out = [] + for c in conds: + if c.shape[1] < crossattn_max_len: + c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result + out.append(c) + return torch.cat(out) diff --git a/comfy/model_base.py b/comfy/model_base.py index cda6765e43a..edc246f8c94 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -4,6 +4,7 @@ from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep import comfy.model_management +import comfy.conds import numpy as np from enum import Enum from . import utils @@ -49,7 +50,7 @@ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps= self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}): + def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}, **kwargs): if c_concat is not None: xc = torch.cat([x] + [c_concat], dim=1) else: @@ -72,7 +73,8 @@ def is_adm(self): def encode_adm(self, **kwargs): return None - def cond_concat(self, **kwargs): + def extra_conds(self, **kwargs): + out = {} if self.inpaint_model: concat_keys = ("mask", "masked_image") cond_concat = [] @@ -101,8 +103,12 @@ def blank_inpaint_image_like(latent_image): cond_concat.append(torch.ones_like(noise)[:,:1]) elif ck == "masked_image": cond_concat.append(blank_inpaint_image_like(noise)) - return cond_concat - return None + data = torch.cat(cond_concat, dim=1) + out['c_concat'] = comfy.conds.CONDNoiseShape(data) + adm = self.encode_adm(**kwargs) + if adm is not None: + out['c_adm'] = comfy.conds.CONDRegular(adm) + return out def load_model_weights(self, sd, unet_prefix=""): to_load = {} diff --git a/comfy/sample.py b/comfy/sample.py index e6a69973d93..b3fcd1658a5 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -1,6 +1,7 @@ import torch import comfy.model_management import comfy.samplers +import comfy.conds import comfy.utils import math import numpy as np @@ -33,22 +34,24 @@ def prepare_mask(noise_mask, shape, device): noise_mask = noise_mask.to(device) return noise_mask -def broadcast_cond(cond, batch, device): - """broadcasts conditioning to the batch size""" - copy = [] - for p in cond: - t = comfy.utils.repeat_to_batch_size(p[0], batch) - t = t.to(device) - copy += [[t] + p[1:]] - return copy - def get_models_from_cond(cond, model_type): models = [] for c in cond: - if model_type in c[1]: - models += [c[1][model_type]] + if model_type in c: + models += [c[model_type]] return models +def convert_cond(cond): + out = [] + for c in cond: + temp = c[1].copy() + model_conds = temp.get("model_conds", {}) + if c[0] is not None: + model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) + temp["model_conds"] = model_conds + out.append(temp) + return out + def get_additional_models(positive, negative, dtype): """loads additional models in positive and negative conditioning""" control_nets = set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control")) @@ -72,6 +75,8 @@ def cleanup_additional_models(models): def prepare_sampling(model, noise_shape, positive, negative, noise_mask): device = model.load_device + positive = convert_cond(positive) + negative = convert_cond(negative) if noise_mask is not None: noise_mask = prepare_mask(noise_mask, noise_shape, device) @@ -81,9 +86,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise_shape[0] * noise_shape[2] * noise_shape[3]) + inference_memory) real_model = model.model - positive_copy = broadcast_cond(positive, noise_shape[0], device) - negative_copy = broadcast_cond(negative, noise_shape[0], device) - return real_model, positive_copy, negative_copy, noise_mask, models + return real_model, positive, negative, noise_mask, models def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, noise_mask=None, sigmas=None, callback=None, disable_pbar=False, seed=None): diff --git a/comfy/samplers.py b/comfy/samplers.py index f88b790d8f4..f930aa39bb3 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -2,96 +2,44 @@ from .k_diffusion import external as k_diffusion_external from .extra_samplers import uni_pc import torch +import enum from comfy import model_management from .ldm.models.diffusion.ddim import DDIMSampler from .ldm.modules.diffusionmodules.util import make_ddim_timesteps import math from comfy import model_base import comfy.utils - - -def lcm(a, b): #TODO: eventually replace by math.lcm (added in python3.9) - return abs(a*b) // math.gcd(a, b) - -class CONDRegular: - def __init__(self, cond): - self.cond = cond - - def can_concat(self, other): - if self.cond.shape != other.cond.shape: - return False - return True - - def concat(self, others): - conds = [self.cond] - for x in others: - conds.append(x.cond) - return torch.cat(conds) - -class CONDCrossAttn: - def __init__(self, cond): - self.cond = cond - - def can_concat(self, other): - s1 = self.cond.shape - s2 = other.cond.shape - if s1 != s2: - if s1[0] != s2[0] or s1[2] != s2[2]: #these 2 cases should not happen - return False - - mult_min = lcm(s1[1], s2[1]) - diff = mult_min // min(s1[1], s2[1]) - if diff > 4: #arbitrary limit on the padding because it's probably going to impact performance negatively if it's too much - return False - return True - - def concat(self, others): - conds = [self.cond] - crossattn_max_len = self.cond.shape[1] - for x in others: - c = x.cond - crossattn_max_len = lcm(crossattn_max_len, c.shape[1]) - conds.append(c) - - out = [] - for c in conds: - if c.shape[1] < crossattn_max_len: - c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result - out.append(c) - return torch.cat(out) +import comfy.conds #The main sampling function shared by all the samplers #Returns predicted noise def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): - def get_area_and_mult(cond, x_in, timestep_in): + def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 - if 'timestep_start' in cond[1]: - timestep_start = cond[1]['timestep_start'] + + if 'timestep_start' in conds: + timestep_start = conds['timestep_start'] if timestep_in[0] > timestep_start: return None - if 'timestep_end' in cond[1]: - timestep_end = cond[1]['timestep_end'] + if 'timestep_end' in conds: + timestep_end = conds['timestep_end'] if timestep_in[0] < timestep_end: return None - if 'area' in cond[1]: - area = cond[1]['area'] - if 'strength' in cond[1]: - strength = cond[1]['strength'] - - adm_cond = None - if 'adm_encoded' in cond[1]: - adm_cond = cond[1]['adm_encoded'] + if 'area' in conds: + area = conds['area'] + if 'strength' in conds: + strength = conds['strength'] input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - if 'mask' in cond[1]: + if 'mask' in conds: # Scale the mask to the size of the input # The mask should have been resized as we began the sampling process mask_strength = 1.0 - if "mask_strength" in cond[1]: - mask_strength = cond[1]["mask_strength"] - mask = cond[1]['mask'] + if "mask_strength" in conds: + mask_strength = conds["mask_strength"] + mask = conds['mask'] assert(mask.shape[1] == x_in.shape[2]) assert(mask.shape[2] == x_in.shape[3]) mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength @@ -100,7 +48,7 @@ def get_area_and_mult(cond, x_in, timestep_in): mask = torch.ones_like(input_x) mult = mask * strength - if 'mask' not in cond[1]: + if 'mask' not in conds: rr = 8 if area[2] != 0: for t in range(rr): @@ -116,27 +64,17 @@ def get_area_and_mult(cond, x_in, timestep_in): mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) conditionning = {} - conditionning['c_crossattn'] = CONDCrossAttn(cond[0]) - - if 'concat' in cond[1]: - cond_concat_in = cond[1]['concat'] - if cond_concat_in is not None and len(cond_concat_in) > 0: - cropped = [] - for x in cond_concat_in: - cr = x[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - cropped.append(cr) - conditionning['c_concat'] = CONDRegular(torch.cat(cropped, dim=1)) - - if adm_cond is not None: - conditionning['c_adm'] = CONDRegular(adm_cond) + model_conds = conds["model_conds"] + for c in model_conds: + conditionning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) control = None - if 'control' in cond[1]: - control = cond[1]['control'] + if 'control' in conds: + control = conds['control'] patches = None - if 'gligen' in cond[1]: - gligen = cond[1]['gligen'] + if 'gligen' in conds: + gligen = conds['gligen'] patches = {} gligen_type = gligen[0] gligen_model = gligen[1] @@ -412,19 +350,19 @@ def resolve_areas_and_cond_masks(conditions, h, w, device): # While we're doing this, we can also resolve the mask device and scaling for performance reasons for i in range(len(conditions)): c = conditions[i] - if 'area' in c[1]: - area = c[1]['area'] + if 'area' in c: + area = c['area'] if area[0] == "percentage": - modified = c[1].copy() + modified = c.copy() area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w)) modified['area'] = area - c = [c[0], modified] + c = modified conditions[i] = c - if 'mask' in c[1]: - mask = c[1]['mask'] + if 'mask' in c: + mask = c['mask'] mask = mask.to(device=device) - modified = c[1].copy() + modified = c.copy() if len(mask.shape) == 2: mask = mask.unsqueeze(0) if mask.shape[1] != h or mask.shape[2] != w: @@ -445,37 +383,39 @@ def resolve_areas_and_cond_masks(conditions, h, w, device): modified['area'] = area modified['mask'] = mask - conditions[i] = [c[0], modified] + conditions[i] = modified def create_cond_with_same_area_if_none(conds, c): - if 'area' not in c[1]: + if 'area' not in c: return - c_area = c[1]['area'] + c_area = c['area'] smallest = None for x in conds: - if 'area' in x[1]: - a = x[1]['area'] + if 'area' in x: + a = x['area'] if c_area[2] >= a[2] and c_area[3] >= a[3]: if a[0] + a[2] >= c_area[0] + c_area[2]: if a[1] + a[3] >= c_area[1] + c_area[3]: if smallest is None: smallest = x - elif 'area' not in smallest[1]: + elif 'area' not in smallest: smallest = x else: - if smallest[1]['area'][0] * smallest[1]['area'][1] > a[0] * a[1]: + if smallest['area'][0] * smallest['area'][1] > a[0] * a[1]: smallest = x else: if smallest is None: smallest = x if smallest is None: return - if 'area' in smallest[1]: - if smallest[1]['area'] == c_area: + if 'area' in smallest: + if smallest['area'] == c_area: return - n = c[1].copy() - conds += [[smallest[0], n]] + + out = c.copy() + out['model_conds'] = smallest['model_conds'].copy() #TODO: which fields should be copied? + conds += [out] def calculate_start_end_timesteps(model, conds): for t in range(len(conds)): @@ -483,18 +423,18 @@ def calculate_start_end_timesteps(model, conds): timestep_start = None timestep_end = None - if 'start_percent' in x[1]: - timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['start_percent'] * 999.0))) - if 'end_percent' in x[1]: - timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['end_percent'] * 999.0))) + if 'start_percent' in x: + timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['start_percent'] * 999.0))) + if 'end_percent' in x: + timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['end_percent'] * 999.0))) if (timestep_start is not None) or (timestep_end is not None): - n = x[1].copy() + n = x.copy() if (timestep_start is not None): n['timestep_start'] = timestep_start if (timestep_end is not None): n['timestep_end'] = timestep_end - conds[t] = [x[0], n] + conds[t] = n def pre_run_control(model, conds): for t in range(len(conds)): @@ -503,8 +443,8 @@ def pre_run_control(model, conds): timestep_start = None timestep_end = None percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) - if 'control' in x[1]: - x[1]['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) + if 'control' in x: + x['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] @@ -513,16 +453,16 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): uncond_other = [] for t in range(len(conds)): x = conds[t] - if 'area' not in x[1]: - if name in x[1] and x[1][name] is not None: - cond_cnets.append(x[1][name]) + if 'area' not in x: + if name in x and x[name] is not None: + cond_cnets.append(x[name]) else: cond_other.append((x, t)) for t in range(len(uncond)): x = uncond[t] - if 'area' not in x[1]: - if name in x[1] and x[1][name] is not None: - uncond_cnets.append(x[1][name]) + if 'area' not in x: + if name in x and x[name] is not None: + uncond_cnets.append(x[name]) else: uncond_other.append((x, t)) @@ -532,47 +472,35 @@ def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): for x in range(len(cond_cnets)): temp = uncond_other[x % len(uncond_other)] o = temp[0] - if name in o[1] and o[1][name] is not None: - n = o[1].copy() + if name in o and o[name] is not None: + n = o.copy() n[name] = uncond_fill_func(cond_cnets, x) - uncond += [[o[0], n]] + uncond += [n] else: - n = o[1].copy() + n = o.copy() n[name] = uncond_fill_func(cond_cnets, x) - uncond[temp[1]] = [o[0], n] - -def encode_adm(model, conds, batch_size, width, height, device, prompt_type): - for t in range(len(conds)): - x = conds[t] - adm_out = None - if 'adm' in x[1]: - adm_out = x[1]["adm"] - else: - params = x[1].copy() - params["width"] = params.get("width", width * 8) - params["height"] = params.get("height", height * 8) - params["prompt_type"] = params.get("prompt_type", prompt_type) - adm_out = model.encode_adm(device=device, **params) - - if adm_out is not None: - x[1] = x[1].copy() - x[1]["adm_encoded"] = comfy.utils.repeat_to_batch_size(adm_out, batch_size).to(device) + uncond[temp[1]] = n - return conds - -def encode_cond(model_function, key, conds, device, **kwargs): +def encode_model_conds(model_function, conds, noise, device, prompt_type, **kwargs): for t in range(len(conds)): x = conds[t] - params = x[1].copy() + params = x.copy() params["device"] = device + params["noise"] = noise + params["width"] = params.get("width", noise.shape[3] * 8) + params["height"] = params.get("height", noise.shape[2] * 8) + params["prompt_type"] = params.get("prompt_type", prompt_type) for k in kwargs: if k not in params: params[k] = kwargs[k] out = model_function(**params) - if out is not None: - x[1] = x[1].copy() - x[1][key] = out + x = x.copy() + model_conds = x['model_conds'].copy() + for k in out: + model_conds[k] = out[k] + x['model_conds'] = model_conds + conds[t] = x return conds class Sampler: @@ -690,19 +618,15 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model pre_run_control(model_wrap, negative + positive) - apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) + apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) if latent_image is not None: latent_image = model.process_latent_in(latent_image) - if model.is_adm(): - positive = encode_adm(model, positive, noise.shape[0], noise.shape[3], noise.shape[2], device, "positive") - negative = encode_adm(model, negative, noise.shape[0], noise.shape[3], noise.shape[2], device, "negative") - - if hasattr(model, 'cond_concat'): - positive = encode_cond(model.cond_concat, "concat", positive, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_cond(model.cond_concat, "concat", negative, device, noise=noise, latent_image=latent_image, denoise_mask=denoise_mask) + if hasattr(model, 'extra_conds'): + positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} From d1d2fea806c07b1519634ec6dbc8c7f60dee8f4e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 00:07:53 -0400 Subject: [PATCH 128/420] Pass extra conds directly to unet. --- comfy/model_base.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index edc246f8c94..ea3ea61f213 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -50,7 +50,7 @@ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps= self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control=None, transformer_options={}, **kwargs): + def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): if c_concat is not None: xc = torch.cat([x] + [c_concat], dim=1) else: @@ -60,9 +60,10 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, c_adm=None, control xc = xc.to(dtype) t = t.to(dtype) context = context.to(dtype) - if c_adm is not None: - c_adm = c_adm.to(dtype) - return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float() + extra_conds = {} + for o in kwargs: + extra_conds[o] = kwargs[o].to(dtype) + return self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() def get_dtype(self): return self.diffusion_model.dtype @@ -107,7 +108,7 @@ def blank_inpaint_image_like(latent_image): out['c_concat'] = comfy.conds.CONDNoiseShape(data) adm = self.encode_adm(**kwargs) if adm is not None: - out['c_adm'] = comfy.conds.CONDRegular(adm) + out['y'] = comfy.conds.CONDRegular(adm) return out def load_model_weights(self, sd, unet_prefix=""): From 3783cb8bfd4bc0a688a565319257931f4737a958 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Wed, 25 Oct 2023 08:24:32 -0500 Subject: [PATCH 129/420] change 'c_adm' to 'y' in ControlNet.get_control --- comfy/controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index f1355e64e9d..2a88dd01924 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -156,7 +156,7 @@ def get_control(self, x_noisy, t, cond, batched_number): context = cond['c_crossattn'] - y = cond.get('c_adm', None) + y = cond.get('y', None) if y is not None: y = y.to(self.control_model.dtype) control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) From 7fbb217d3a46fc117dd78b90191e528139fb851a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 16:08:30 -0400 Subject: [PATCH 130/420] Fix uni_pc returning noisy image when steps <= 3 --- comfy/extra_samplers/uni_pc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 58e030d0439..9d5f0c60bdc 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -881,7 +881,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex model_kwargs=extra_args, ) - order = min(3, len(timesteps) - 1) + order = min(3, len(timesteps) - 2) uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, noise_mask=noise_mask, masked_image=image, noise=noise, variant=variant) x = uni_pc.sample(img, timesteps=timesteps, skip_type="time_uniform", method="multistep", order=order, lower_order_final=True, callback=callback, disable_pbar=disable) x /= ns.marginal_alpha(timesteps[-1]) From a373367b0cf37e9c67b30d21c207417dedfffd4f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 25 Oct 2023 20:17:28 -0400 Subject: [PATCH 131/420] Fix some OOM issues with split and sub quad attention. --- comfy/ldm/modules/attention.py | 9 +++++++-- comfy/ldm/modules/sub_quadratic_attention.py | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index dcf467489fe..4f10bbc3529 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -222,9 +222,14 @@ def attention_split(q, k, v, heads, mask=None): mem_free_total = model_management.get_free_memory(q.device) + if _ATTN_PRECISION =="fp32": + element_size = 4 + else: + element_size = q.element_size() + gb = 1024 ** 3 - tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() - modifier = 3 if q.element_size() == 2 else 2.5 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size + modifier = 3 if element_size == 2 else 2.5 mem_required = tensor_size * modifier steps = 1 diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 4d42059b5a8..8e8e8054dfd 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -83,7 +83,8 @@ def _summarize_chunk( ) max_score, _ = torch.max(attn_weights, -1, keepdim=True) max_score = max_score.detach() - torch.exp(attn_weights - max_score, out=attn_weights) + attn_weights -= max_score + torch.exp(attn_weights, out=attn_weights) exp_weights = attn_weights.to(value.dtype) exp_values = torch.bmm(exp_weights, value) max_score = max_score.squeeze(-1) From 723847f6b3d5da21e5d712bc0139fb7197ba60a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 26 Oct 2023 01:53:01 -0400 Subject: [PATCH 132/420] Faster clip image processing. --- comfy/clip_vision.py | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index e085186ef68..9e2e03d7238 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,5 +1,5 @@ -from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor, modeling_utils -from .utils import load_torch_file, transformers_convert +from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, modeling_utils +from .utils import load_torch_file, transformers_convert, common_upscale import os import torch import contextlib @@ -7,6 +7,18 @@ import comfy.ops import comfy.model_patcher import comfy.model_management +import comfy.utils + +def clip_preprocess(image, size=224): + mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype) + std = torch.tensor([0.26862954,0.26130258,0.27577711], device=image.device, dtype=image.dtype) + scale = (size / min(image.shape[1], image.shape[2])) + image = torch.nn.functional.interpolate(image.movedim(-1, 1), size=(round(scale * image.shape[1]), round(scale * image.shape[2])), mode="bicubic", antialias=True) + h = (image.shape[2] - size)//2 + w = (image.shape[3] - size)//2 + image = image[:,:,h:h+size,w:w+size] + image = torch.clip((255. * image), 0, 255).round() / 255.0 + return (image - mean.view([3,1,1])) / std.view([3,1,1]) class ClipVisionModel(): def __init__(self, json_config): @@ -23,25 +35,12 @@ def __init__(self, json_config): self.model.to(self.dtype) self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) - self.processor = CLIPImageProcessor(crop_size=224, - do_center_crop=True, - do_convert_rgb=True, - do_normalize=True, - do_resize=True, - image_mean=[ 0.48145466,0.4578275,0.40821073], - image_std=[0.26862954,0.26130258,0.27577711], - resample=3, #bicubic - size=224) - def load_sd(self, sd): return self.model.load_state_dict(sd, strict=False) def encode_image(self, image): - img = torch.clip((255. * image), 0, 255).round().int() - img = list(map(lambda a: a, img)) - inputs = self.processor(images=img, return_tensors="pt") comfy.model_management.load_model_gpu(self.patcher) - pixel_values = inputs['pixel_values'].to(self.load_device) + pixel_values = clip_preprocess(image.to(self.load_device)) if self.dtype != torch.float32: precision_scope = torch.autocast From 40963b5a16f717a636c98dae0055224938852c6a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 26 Oct 2023 19:52:41 -0400 Subject: [PATCH 133/420] Apply primitive nodes to graph before serializing workflow. --- web/scripts/app.js | 14 ++++++++++---- web/scripts/ui.js | 28 +++++++++++++++------------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index fca5b5bd31e..583310a27c7 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1586,6 +1586,16 @@ export class ComfyApp { * @returns The workflow and node links */ async graphToPrompt() { + for (const node of this.graph.computeExecutionOrder(false)) { + if (node.isVirtualNode) { + // Don't serialize frontend only nodes but let them make changes + if (node.applyToGraph) { + node.applyToGraph(); + } + continue; + } + } + const workflow = this.graph.serialize(); const output = {}; // Process nodes in order of execution @@ -1593,10 +1603,6 @@ export class ComfyApp { const n = workflow.nodes.find((n) => n.id === node.id); if (node.isVirtualNode) { - // Don't serialize frontend only nodes but let them make changes - if (node.applyToGraph) { - node.applyToGraph(workflow); - } continue; } diff --git a/web/scripts/ui.js b/web/scripts/ui.js index c3b3fbda114..6f01aa5b245 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -719,20 +719,22 @@ export class ComfyUI { filename += ".json"; } } - const json = JSON.stringify(app.graph.serialize(), null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); - const url = URL.createObjectURL(blob); - const a = $el("a", { - href: url, - download: filename, - style: {display: "none"}, - parent: document.body, + app.graphToPrompt().then(p=>{ + const json = JSON.stringify(p.workflow, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: filename, + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); }); - a.click(); - setTimeout(function () { - a.remove(); - window.URL.revokeObjectURL(url); - }, 0); }, }), $el("button", { From 434ce25ec00719ec67372482af2f0e6e517d548a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 02:42:14 -0400 Subject: [PATCH 134/420] Restrict loading embeddings from embedding folders. --- comfy/sd1_clip.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 9978b6c35c6..ffe2bd3bd14 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -278,7 +278,13 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No valid_file = None for embed_dir in embedding_directory: - embed_path = os.path.join(embed_dir, embedding_name) + embed_path = os.path.abspath(os.path.join(embed_dir, embedding_name)) + embed_dir = os.path.abspath(embed_dir) + try: + if os.path.commonpath((embed_dir, embed_path)) != embed_dir: + continue + except: + continue if not os.path.isfile(embed_path): extensions = ['.safetensors', '.pt', '.bin'] for x in extensions: From 6ec3f12c6e2e1d214c41f5713308818541da52a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 14:15:45 -0400 Subject: [PATCH 135/420] Support SSD1B model and make it easier to support asymmetric unets. --- comfy/cldm/cldm.py | 47 ++++---- .../modules/diffusionmodules/openaimodel.py | 42 +++---- comfy/model_detection.py | 112 ++++++++++++++---- comfy/sd.py | 2 +- comfy/supported_models.py | 16 ++- comfy/utils.py | 32 ++--- 6 files changed, 154 insertions(+), 97 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index f982d648ce4..9a63202ab07 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -27,7 +27,6 @@ def __init__( model_channels, hint_channels, num_res_blocks, - attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, @@ -52,6 +51,7 @@ def __init__( use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + transformer_depth_output=None, device=None, operations=comfy.ops, ): @@ -79,10 +79,7 @@ def __init__( self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels - if isinstance(transformer_depth, int): - transformer_depth = len(channel_mult) * [transformer_depth] - if transformer_depth_middle is None: - transformer_depth_middle = transformer_depth[-1] + if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: @@ -90,18 +87,16 @@ def __init__( raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - self.attention_resolutions = attention_resolutions + transformer_depth = transformer_depth[:] + self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample @@ -180,11 +175,14 @@ def __init__( dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, - operations=operations + dtype=self.dtype, + device=device, + operations=operations, ) ] ch = mult * model_channels - if ds in attention_resolutions: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -201,9 +199,9 @@ def __init__( if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, operations=operations + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) @@ -223,11 +221,13 @@ def __init__( use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, + dtype=self.dtype, + device=device, operations=operations ) if resblock_updown else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch, operations=operations + ch, conv_resample, dims=dims, out_channels=out_ch, dtype=self.dtype, device=device, operations=operations ) ) ) @@ -245,7 +245,7 @@ def __init__( if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( + mid_block = [ ResBlock( ch, time_embed_dim, @@ -253,12 +253,15 @@ def __init__( dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + dtype=self.dtype, + device=device, operations=operations - ), - SpatialTransformer( # always uses a self-attn + )] + if transformer_depth_middle >= 0: + mid_block += [SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, operations=operations + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ), ResBlock( ch, @@ -267,9 +270,11 @@ def __init__( dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, + dtype=self.dtype, + device=device, operations=operations - ), - ) + )] + self.middle_block = TimestepEmbedSequential(*mid_block) self.middle_block_out = self.make_zero_conv(ch, operations=operations) self._feature_size += ch diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index bf58a4045f7..7dfdfc0a29c 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -259,10 +259,6 @@ class UNetModel(nn.Module): :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and @@ -289,7 +285,6 @@ def __init__( model_channels, out_channels, num_res_blocks, - attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, @@ -314,6 +309,7 @@ def __init__( use_linear_in_transformer=False, adm_in_channels=None, transformer_depth_middle=None, + transformer_depth_output=None, device=None, operations=comfy.ops, ): @@ -341,10 +337,7 @@ def __init__( self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels - if isinstance(transformer_depth, int): - transformer_depth = len(channel_mult) * [transformer_depth] - if transformer_depth_middle is None: - transformer_depth_middle = transformer_depth[-1] + if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: @@ -352,18 +345,16 @@ def __init__( raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks + if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) - assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) - print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " - f"This option has LESS priority than attention_resolutions {attention_resolutions}, " - f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " - f"attention will still not be set.") - self.attention_resolutions = attention_resolutions + transformer_depth = transformer_depth[:] + transformer_depth_output = transformer_depth_output[:] + self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample @@ -428,7 +419,8 @@ def __init__( ) ] ch = mult * model_channels - if ds in attention_resolutions: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -444,7 +436,7 @@ def __init__( if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append(SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) @@ -488,7 +480,7 @@ def __init__( if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( + mid_block = [ ResBlock( ch, time_embed_dim, @@ -499,8 +491,9 @@ def __init__( dtype=self.dtype, device=device, operations=operations - ), - SpatialTransformer( # always uses a self-attn + )] + if transformer_depth_middle >= 0: + mid_block += [SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations @@ -515,8 +508,8 @@ def __init__( dtype=self.dtype, device=device, operations=operations - ), - ) + )] + self.middle_block = TimestepEmbedSequential(*mid_block) self._feature_size += ch self.output_blocks = nn.ModuleList([]) @@ -538,7 +531,8 @@ def __init__( ) ] ch = model_channels * mult - if ds in attention_resolutions: + num_transformers = transformer_depth_output.pop() + if num_transformers > 0: if num_head_channels == -1: dim_head = ch // num_heads else: @@ -555,7 +549,7 @@ def __init__( if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth[level], context_dim=context_dim, + ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations ) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 0ff2e7fb53f..4f4e0b3b7f0 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -14,6 +14,19 @@ def count_blocks(state_dict_keys, prefix_string): count += 1 return count +def calculate_transformer_depth(prefix, state_dict_keys, state_dict): + context_dim = None + use_linear_in_transformer = False + + transformer_prefix = prefix + "1.transformer_blocks." + transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys))) + if len(transformer_keys) > 0: + last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') + context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] + use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 + return last_transformer_depth, context_dim, use_linear_in_transformer + return None + def detect_unet_config(state_dict, key_prefix, dtype): state_dict_keys = list(state_dict.keys()) @@ -40,6 +53,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): channel_mult = [] attention_resolutions = [] transformer_depth = [] + transformer_depth_output = [] context_dim = None use_linear_in_transformer = False @@ -48,60 +62,67 @@ def detect_unet_config(state_dict, key_prefix, dtype): count = 0 last_res_blocks = 0 - last_transformer_depth = 0 last_channel_mult = 0 - while True: + input_block_count = count_blocks(state_dict_keys, '{}input_blocks'.format(key_prefix) + '.{}.') + for count in range(input_block_count): prefix = '{}input_blocks.{}.'.format(key_prefix, count) + prefix_output = '{}output_blocks.{}.'.format(key_prefix, input_block_count - count - 1) + block_keys = sorted(list(filter(lambda a: a.startswith(prefix), state_dict_keys))) if len(block_keys) == 0: break + block_keys_output = sorted(list(filter(lambda a: a.startswith(prefix_output), state_dict_keys))) + if "{}0.op.weight".format(prefix) in block_keys: #new layer - if last_transformer_depth > 0: - attention_resolutions.append(current_res) - transformer_depth.append(last_transformer_depth) num_res_blocks.append(last_res_blocks) channel_mult.append(last_channel_mult) current_res *= 2 last_res_blocks = 0 - last_transformer_depth = 0 last_channel_mult = 0 + out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict) + if out is not None: + transformer_depth_output.append(out[0]) + else: + transformer_depth_output.append(0) else: res_block_prefix = "{}0.in_layers.0.weight".format(prefix) if res_block_prefix in block_keys: last_res_blocks += 1 last_channel_mult = state_dict["{}0.out_layers.3.weight".format(prefix)].shape[0] // model_channels - transformer_prefix = prefix + "1.transformer_blocks." - transformer_keys = sorted(list(filter(lambda a: a.startswith(transformer_prefix), state_dict_keys))) - if len(transformer_keys) > 0: - last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') - if context_dim is None: - context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] - use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 + out = calculate_transformer_depth(prefix, state_dict_keys, state_dict) + if out is not None: + transformer_depth.append(out[0]) + if context_dim is None: + context_dim = out[1] + use_linear_in_transformer = out[2] + else: + transformer_depth.append(0) + + res_block_prefix = "{}0.in_layers.0.weight".format(prefix_output) + if res_block_prefix in block_keys_output: + out = calculate_transformer_depth(prefix_output, state_dict_keys, state_dict) + if out is not None: + transformer_depth_output.append(out[0]) + else: + transformer_depth_output.append(0) - count += 1 - if last_transformer_depth > 0: - attention_resolutions.append(current_res) - transformer_depth.append(last_transformer_depth) num_res_blocks.append(last_res_blocks) channel_mult.append(last_channel_mult) - transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}') - - if len(set(num_res_blocks)) == 1: - num_res_blocks = num_res_blocks[0] - - if len(set(transformer_depth)) == 1: - transformer_depth = transformer_depth[0] + if "{}middle_block.1.proj_in.weight".format(key_prefix) in state_dict_keys: + transformer_depth_middle = count_blocks(state_dict_keys, '{}middle_block.1.transformer_blocks.'.format(key_prefix) + '{}') + else: + transformer_depth_middle = -1 unet_config["in_channels"] = in_channels unet_config["model_channels"] = model_channels unet_config["num_res_blocks"] = num_res_blocks - unet_config["attention_resolutions"] = attention_resolutions unet_config["transformer_depth"] = transformer_depth + unet_config["transformer_depth_output"] = transformer_depth_output unet_config["channel_mult"] = channel_mult unet_config["transformer_depth_middle"] = transformer_depth_middle unet_config['use_linear_in_transformer'] = use_linear_in_transformer @@ -124,6 +145,45 @@ def model_config_from_unet(state_dict, unet_key_prefix, dtype, use_base_if_no_ma else: return model_config +def convert_config(unet_config): + new_config = unet_config.copy() + num_res_blocks = new_config.get("num_res_blocks", None) + channel_mult = new_config.get("channel_mult", None) + + if isinstance(num_res_blocks, int): + num_res_blocks = len(channel_mult) * [num_res_blocks] + + if "attention_resolutions" in new_config: + attention_resolutions = new_config.pop("attention_resolutions") + transformer_depth = new_config.get("transformer_depth", None) + transformer_depth_middle = new_config.get("transformer_depth_middle", None) + + if isinstance(transformer_depth, int): + transformer_depth = len(channel_mult) * [transformer_depth] + if transformer_depth_middle is None: + transformer_depth_middle = transformer_depth[-1] + t_in = [] + t_out = [] + s = 1 + for i in range(len(num_res_blocks)): + res = num_res_blocks[i] + d = 0 + if s in attention_resolutions: + d = transformer_depth[i] + + t_in += [d] * res + t_out += [d] * (res + 1) + s *= 2 + transformer_depth = t_in + transformer_depth_output = t_out + new_config["transformer_depth"] = t_in + new_config["transformer_depth_output"] = t_out + new_config["transformer_depth_middle"] = transformer_depth_middle + + new_config["num_res_blocks"] = num_res_blocks + return new_config + + def unet_config_from_diffusers_unet(state_dict, dtype): match = {} attention_resolutions = [] @@ -200,7 +260,7 @@ def unet_config_from_diffusers_unet(state_dict, dtype): matches = False break if matches: - return unet_config + return convert_config(unet_config) return None def model_config_from_diffusers_unet(state_dict, dtype): diff --git a/comfy/sd.py b/comfy/sd.py index c364b723cb9..aea55bbdf8f 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -360,7 +360,7 @@ class EmptyClass: from . import latent_formats model_config.latent_format = latent_formats.SD15(scale_factor=scale_factor) - model_config.unet_config = unet_config + model_config.unet_config = model_detection.convert_config(unet_config) if config['model']["target"].endswith("ImageEmbeddingConditionedLatentDiffusion"): model = model_base.SD21UNCLIP(model_config, noise_aug_config["params"], model_type=model_type) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index bb8ae2148fd..820f2861cee 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -104,7 +104,7 @@ class SDXLRefiner(supported_models_base.BASE): "use_linear_in_transformer": True, "context_dim": 1280, "adm_in_channels": 2560, - "transformer_depth": [0, 4, 4, 0], + "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0], } latent_format = latent_formats.SDXL @@ -139,7 +139,7 @@ class SDXL(supported_models_base.BASE): unet_config = { "model_channels": 320, "use_linear_in_transformer": True, - "transformer_depth": [0, 2, 10], + "transformer_depth": [0, 0, 2, 2, 10, 10], "context_dim": 2048, "adm_in_channels": 2816 } @@ -165,6 +165,7 @@ def process_clip_state_dict(self, state_dict): replace_prefix["conditioner.embedders.0.transformer.text_model"] = "cond_stage_model.clip_l.transformer.text_model" state_dict = utils.transformers_convert(state_dict, "conditioner.embedders.1.model.", "cond_stage_model.clip_g.transformer.text_model.", 32) keys_to_replace["conditioner.embedders.1.model.text_projection"] = "cond_stage_model.clip_g.text_projection" + keys_to_replace["conditioner.embedders.1.model.text_projection.weight"] = "cond_stage_model.clip_g.text_projection" keys_to_replace["conditioner.embedders.1.model.logit_scale"] = "cond_stage_model.clip_g.logit_scale" state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) @@ -189,5 +190,14 @@ def process_clip_state_dict_for_saving(self, state_dict): def clip_target(self): return supported_models_base.ClipTarget(sdxl_clip.SDXLTokenizer, sdxl_clip.SDXLClipModel) +class SSD1B(SDXL): + unet_config = { + "model_channels": 320, + "use_linear_in_transformer": True, + "transformer_depth": [0, 0, 2, 2, 4, 4], + "context_dim": 2048, + "adm_in_channels": 2816 + } + -models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL] +models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B] diff --git a/comfy/utils.py b/comfy/utils.py index a1807aa1d47..6a0c54e8098 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -170,25 +170,12 @@ def transformers_convert(sd, prefix_from, prefix_to, number): def unet_to_diffusers(unet_config): num_res_blocks = unet_config["num_res_blocks"] - attention_resolutions = unet_config["attention_resolutions"] channel_mult = unet_config["channel_mult"] - transformer_depth = unet_config["transformer_depth"] + transformer_depth = unet_config["transformer_depth"][:] + transformer_depth_output = unet_config["transformer_depth_output"][:] num_blocks = len(channel_mult) - if isinstance(num_res_blocks, int): - num_res_blocks = [num_res_blocks] * num_blocks - if isinstance(transformer_depth, int): - transformer_depth = [transformer_depth] * num_blocks - - transformers_per_layer = [] - res = 1 - for i in range(num_blocks): - transformers = 0 - if res in attention_resolutions: - transformers = transformer_depth[i] - transformers_per_layer.append(transformers) - res *= 2 - - transformers_mid = unet_config.get("transformer_depth_middle", transformer_depth[-1]) + + transformers_mid = unet_config.get("transformer_depth_middle", None) diffusers_unet_map = {} for x in range(num_blocks): @@ -196,10 +183,11 @@ def unet_to_diffusers(unet_config): for i in range(num_res_blocks[x]): for b in UNET_MAP_RESNET: diffusers_unet_map["down_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "input_blocks.{}.0.{}".format(n, b) - if transformers_per_layer[x] > 0: + num_transformers = transformer_depth.pop(0) + if num_transformers > 0: for b in UNET_MAP_ATTENTIONS: diffusers_unet_map["down_blocks.{}.attentions.{}.{}".format(x, i, b)] = "input_blocks.{}.1.{}".format(n, b) - for t in range(transformers_per_layer[x]): + for t in range(num_transformers): for b in TRANSFORMER_BLOCKS: diffusers_unet_map["down_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "input_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b) n += 1 @@ -218,7 +206,6 @@ def unet_to_diffusers(unet_config): diffusers_unet_map["mid_block.resnets.{}.{}".format(i, UNET_MAP_RESNET[b])] = "middle_block.{}.{}".format(n, b) num_res_blocks = list(reversed(num_res_blocks)) - transformers_per_layer = list(reversed(transformers_per_layer)) for x in range(num_blocks): n = (num_res_blocks[x] + 1) * x l = num_res_blocks[x] + 1 @@ -227,11 +214,12 @@ def unet_to_diffusers(unet_config): for b in UNET_MAP_RESNET: diffusers_unet_map["up_blocks.{}.resnets.{}.{}".format(x, i, UNET_MAP_RESNET[b])] = "output_blocks.{}.0.{}".format(n, b) c += 1 - if transformers_per_layer[x] > 0: + num_transformers = transformer_depth_output.pop() + if num_transformers > 0: c += 1 for b in UNET_MAP_ATTENTIONS: diffusers_unet_map["up_blocks.{}.attentions.{}.{}".format(x, i, b)] = "output_blocks.{}.1.{}".format(n, b) - for t in range(transformers_per_layer[x]): + for t in range(num_transformers): for b in TRANSFORMER_BLOCKS: diffusers_unet_map["up_blocks.{}.attentions.{}.transformer_blocks.{}.{}".format(x, i, t, b)] = "output_blocks.{}.1.transformer_blocks.{}.{}".format(n, t, b) if i == l - 1: From e60ca6929a999f53a4eeb62cc80f70b1cd7a0acf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 15:54:04 -0400 Subject: [PATCH 136/420] SD1 and SD2 clip and tokenizer code is now more similar to the SDXL one. --- comfy/lora.py | 6 ++++-- comfy/sd1_clip.py | 41 +++++++++++++++++++++++++++++++++++++-- comfy/sd2_clip.py | 12 ++++++++++-- comfy/sdxl_clip.py | 29 +++++++-------------------- comfy/supported_models.py | 11 +++++++++-- 5 files changed, 69 insertions(+), 30 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index 3009a1c9e0c..d4cf94c9599 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -141,9 +141,9 @@ def model_lora_keys_clip(model, key_map={}): text_model_lora_key = "lora_te_text_model_encoder_layers_{}_{}" clip_l_present = False - for b in range(32): + for b in range(32): #TODO: clean up for c in LORA_CLIP_MAP: - k = "transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) + k = "clip_h.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) key_map[lora_key] = k @@ -154,6 +154,8 @@ def model_lora_keys_clip(model, key_map={}): k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: + lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) + key_map[lora_key] = k lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) #SDXL base key_map[lora_key] = k clip_l_present = True diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index ffe2bd3bd14..5368a45dfdc 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -35,7 +35,7 @@ def encode_token_weights(self, token_weight_pairs): return z_empty.cpu(), first_pooled.cpu() return torch.cat(output, dim=-2).cpu(), first_pooled.cpu() -class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder): +class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" LAYERS = [ "last", @@ -342,7 +342,7 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No embed_out = next(iter(values)) return embed_out -class SD1Tokenizer: +class SDTokenizer: def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l'): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") @@ -454,3 +454,40 @@ def tokenize_with_weights(self, text:str, return_word_ids=False): def untokenize(self, token_weight_pair): return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair)) + + +class SD1Tokenizer: + def __init__(self, embedding_directory=None, clip_name="l", tokenizer=SDTokenizer): + self.clip_name = clip_name + self.clip = "clip_{}".format(self.clip_name) + setattr(self, self.clip, tokenizer(embedding_directory=embedding_directory)) + + def tokenize_with_weights(self, text:str, return_word_ids=False): + out = {} + out[self.clip_name] = getattr(self, self.clip).tokenize_with_weights(text, return_word_ids) + return out + + def untokenize(self, token_weight_pair): + return getattr(self, self.clip).untokenize(token_weight_pair) + + +class SD1ClipModel(torch.nn.Module): + def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel): + super().__init__() + self.clip_name = clip_name + self.clip = "clip_{}".format(self.clip_name) + setattr(self, self.clip, clip_model(device=device, dtype=dtype)) + + def clip_layer(self, layer_idx): + getattr(self, self.clip).clip_layer(layer_idx) + + def reset_clip_layer(self): + getattr(self, self.clip).reset_clip_layer() + + def encode_token_weights(self, token_weight_pairs): + token_weight_pairs = token_weight_pairs[self.clip_name] + out, pooled = getattr(self, self.clip).encode_token_weights(token_weight_pairs) + return out, pooled + + def load_sd(self, sd): + return getattr(self, self.clip).load_sd(sd) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 05e50a0057b..9df868b762f 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -2,7 +2,7 @@ import torch import os -class SD2ClipModel(sd1_clip.SD1ClipModel): +class SD2ClipHModel(sd1_clip.SDClipModel): def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" @@ -12,6 +12,14 @@ def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, la super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) self.empty_tokens = [[49406] + [49407] + [0] * 75] -class SD2Tokenizer(sd1_clip.SD1Tokenizer): +class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024) + +class SD2Tokenizer(sd1_clip.SD1Tokenizer): + def __init__(self, embedding_directory=None): + super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer) + +class SD2ClipModel(sd1_clip.SD1ClipModel): + def __init__(self, device="cpu", dtype=None): + super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel) diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index e3ac2ee0b4a..4c508a0ea88 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -2,7 +2,7 @@ import torch import os -class SDXLClipG(sd1_clip.SD1ClipModel): +class SDXLClipG(sd1_clip.SDClipModel): def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): if layer == "penultimate": layer="hidden" @@ -16,14 +16,14 @@ def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate" def load_sd(self, sd): return super().load_sd(sd) -class SDXLClipGTokenizer(sd1_clip.SD1Tokenizer): +class SDXLClipGTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1280, embedding_key='clip_g') -class SDXLTokenizer(sd1_clip.SD1Tokenizer): +class SDXLTokenizer: def __init__(self, embedding_directory=None): - self.clip_l = sd1_clip.SD1Tokenizer(embedding_directory=embedding_directory) + self.clip_l = sd1_clip.SDTokenizer(embedding_directory=embedding_directory) self.clip_g = SDXLClipGTokenizer(embedding_directory=embedding_directory) def tokenize_with_weights(self, text:str, return_word_ids=False): @@ -38,7 +38,7 @@ def untokenize(self, token_weight_pair): class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SD1ClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) self.clip_l.layer_norm_hidden_state = False self.clip_g = SDXLClipG(device=device, dtype=dtype) @@ -63,21 +63,6 @@ def load_sd(self, sd): else: return self.clip_l.load_sd(sd) -class SDXLRefinerClipModel(torch.nn.Module): +class SDXLRefinerClipModel(sd1_clip.SD1ClipModel): def __init__(self, device="cpu", dtype=None): - super().__init__() - self.clip_g = SDXLClipG(device=device, dtype=dtype) - - def clip_layer(self, layer_idx): - self.clip_g.clip_layer(layer_idx) - - def reset_clip_layer(self): - self.clip_g.reset_clip_layer() - - def encode_token_weights(self, token_weight_pairs): - token_weight_pairs_g = token_weight_pairs["g"] - g_out, g_pooled = self.clip_g.encode_token_weights(token_weight_pairs_g) - return g_out, g_pooled - - def load_sd(self, sd): - return self.clip_g.load_sd(sd) + super().__init__(device=device, dtype=dtype, clip_name="g", clip_model=SDXLClipG) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 820f2861cee..fdd4ea4f5c2 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -38,8 +38,15 @@ def process_clip_state_dict(self, state_dict): if ids.dtype == torch.float32: state_dict['cond_stage_model.transformer.text_model.embeddings.position_ids'] = ids.round() + replace_prefix = {} + replace_prefix["cond_stage_model."] = "cond_stage_model.clip_l." + state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) return state_dict + def process_clip_state_dict_for_saving(self, state_dict): + replace_prefix = {"clip_l.": "cond_stage_model."} + return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def clip_target(self): return supported_models_base.ClipTarget(sd1_clip.SD1Tokenizer, sd1_clip.SD1ClipModel) @@ -62,12 +69,12 @@ def model_type(self, state_dict, prefix=""): return model_base.ModelType.EPS def process_clip_state_dict(self, state_dict): - state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.transformer.text_model.", 24) + state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.clip_h.transformer.text_model.", 24) return state_dict def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {} - replace_prefix[""] = "cond_stage_model.model." + replace_prefix["clip_h"] = "cond_stage_model.model" state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) state_dict = diffusers_convert.convert_text_enc_state_dict_v20(state_dict) return state_dict From 2a134bfab9788b6a0a70aea3172d8e3fc904b414 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 27 Oct 2023 22:13:55 -0400 Subject: [PATCH 137/420] Fix checkpoint loader with config. --- comfy/sd.py | 6 ++++-- comfy/sd1_clip.py | 4 ++-- comfy/sd2_clip.py | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index aea55bbdf8f..4a2823c9d24 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -388,11 +388,13 @@ class EmptyClass: if clip_config["target"].endswith("FrozenOpenCLIPEmbedder"): clip_target.clip = sd2_clip.SD2ClipModel clip_target.tokenizer = sd2_clip.SD2Tokenizer + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model.clip_h elif clip_config["target"].endswith("FrozenCLIPEmbedder"): clip_target.clip = sd1_clip.SD1ClipModel clip_target.tokenizer = sd1_clip.SD1Tokenizer - clip = CLIP(clip_target, embedding_directory=embedding_directory) - w.cond_stage_model = clip.cond_stage_model + clip = CLIP(clip_target, embedding_directory=embedding_directory) + w.cond_stage_model = clip.cond_stage_model.clip_l load_clip_weights(w, state_dict) return (comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device), clip, vae) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 5368a45dfdc..fdaa1e6c76e 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -472,11 +472,11 @@ def untokenize(self, token_weight_pair): class SD1ClipModel(torch.nn.Module): - def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel): + def __init__(self, device="cpu", dtype=None, clip_name="l", clip_model=SDClipModel, **kwargs): super().__init__() self.clip_name = clip_name self.clip = "clip_{}".format(self.clip_name) - setattr(self, self.clip, clip_model(device=device, dtype=dtype)) + setattr(self, self.clip, clip_model(device=device, dtype=dtype, **kwargs)) def clip_layer(self, layer_idx): getattr(self, self.clip).clip_layer(layer_idx) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 9df868b762f..ebabf7ccd51 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -21,5 +21,5 @@ def __init__(self, embedding_directory=None): super().__init__(embedding_directory=embedding_directory, clip_name="h", tokenizer=SD2ClipHTokenizer) class SD2ClipModel(sd1_clip.SD1ClipModel): - def __init__(self, device="cpu", dtype=None): - super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel) + def __init__(self, device="cpu", dtype=None, **kwargs): + super().__init__(device=device, dtype=dtype, clip_name="h", clip_model=SD2ClipHModel, **kwargs) From aac8fc99d6a06e9e3b4c0689c1fff3d379dd0672 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 28 Oct 2023 12:24:50 -0400 Subject: [PATCH 138/420] Cleanup webp import code a bit. --- web/scripts/pnginfo.js | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 4dc3a032c3c..491caed79f5 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -108,29 +108,25 @@ export function getWebpMetadata(file) { return new Promise((r) => { const reader = new FileReader(); reader.onload = (event) => { - // Get the PNG data as a Uint8Array - const pngData = new Uint8Array(event.target.result); - const dataView = new DataView(pngData.buffer); + const webp = new Uint8Array(event.target.result); + const dataView = new DataView(webp.buffer); - // Check that the PNG signature is present + // Check that the WEBP signature is present if (dataView.getUint32(0) !== 0x52494646 || dataView.getUint32(8) !== 0x57454250) { console.error("Not a valid WEBP file"); r(); return; } - // Start searching for chunks after the PNG signature + // Start searching for chunks after the WEBP signature let offset = 12; let txt_chunks = {}; - // Loop through the chunks in the PNG file - while (offset < pngData.length) { - // Get the length of the chunk - const length = dataView.getUint32(offset + 4, true); - // Get the chunk type - const type = String.fromCharCode(...pngData.slice(offset, offset + 4)); - if (type === "EXIF") { - // Get the keyword - let data = parseExifData(pngData.slice(offset + 8, offset + 8 + length)); + // Loop through the chunks in the WEBP file + while (offset < webp.length) { + const chunk_length = dataView.getUint32(offset + 4, true); + const chunk_type = String.fromCharCode(...webp.slice(offset, offset + 4)); + if (chunk_type === "EXIF") { + let data = parseExifData(webp.slice(offset + 8, offset + 8 + chunk_length)); for (var key in data) { var value = data[key]; let index = value.indexOf(':'); @@ -138,7 +134,7 @@ export function getWebpMetadata(file) { } } - offset += 8 + length; + offset += 8 + chunk_length; } r(txt_chunks); From a12cc0532328b93b6d8d4d5a0ca3000d0b24b72c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 29 Oct 2023 03:55:46 -0400 Subject: [PATCH 139/420] Add --max-upload-size argument, the default is 100MB. --- comfy/cli_args.py | 2 ++ server.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d86557646f1..e79b89c0f0d 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -36,6 +36,8 @@ def __call__(self, parser, namespace, values, option_string=None): parser.add_argument("--listen", type=str, default="127.0.0.1", metavar="IP", nargs="?", const="0.0.0.0", help="Specify the IP address to listen on (default: 127.0.0.1). If --listen is provided without an argument, it defaults to 0.0.0.0. (listens on all)") parser.add_argument("--port", type=int, default=8188, help="Set the listen port.") parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*", help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.") +parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.") + parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+', action='append', help="Load one or more extra_model_paths.yaml files.") parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.") parser.add_argument("--temp-directory", type=str, default=None, help="Set the ComfyUI temp directory (default is in the ComfyUI directory).") diff --git a/server.py b/server.py index 63f337a873f..11bd2a0fb44 100644 --- a/server.py +++ b/server.py @@ -82,7 +82,8 @@ def __init__(self, loop): if args.enable_cors_header: middlewares.append(create_cors_middleware(args.enable_cors_header)) - self.app = web.Application(client_max_size=104857600, middlewares=middlewares) + max_upload_size = round(args.max_upload_size * 1024 * 1024) + self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) self.sockets = dict() self.web_root = os.path.join(os.path.dirname( os.path.realpath(__file__)), "web") From 41b07ff8d7807292b56147e12347ab96972c9406 Mon Sep 17 00:00:00 2001 From: Jedrzej Kosinski Date: Sun, 29 Oct 2023 13:30:23 -0500 Subject: [PATCH 140/420] Fix TAESD preview to only decode first latent, instead of all --- latent_preview.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/latent_preview.py b/latent_preview.py index e1553c85cac..6e758a1a9d1 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -22,7 +22,7 @@ def __init__(self, taesd): self.taesd = taesd def decode_latent_to_preview(self, x0): - x_sample = self.taesd.decoder(x0)[0].detach() + x_sample = self.taesd.decoder(x0[:1])[0].detach() # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2] x_sample = x_sample.sub(0.5).mul(2) From 125b03eeadd2ea3e97984e421e90e48d8dd67dbf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 30 Oct 2023 13:14:11 -0400 Subject: [PATCH 141/420] Fix some OOM issues with split attention. --- comfy/ldm/modules/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 4f10bbc3529..9840cc7f5c8 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -229,7 +229,7 @@ def attention_split(q, k, v, heads, mask=None): gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size - modifier = 3 if element_size == 2 else 2.5 + modifier = 3 mem_required = tensor_size * modifier steps = 1 @@ -257,10 +257,10 @@ def attention_split(q, k, v, heads, mask=None): s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale else: s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale - first_op_done = True s2 = s1.softmax(dim=-1).to(v.dtype) del s1 + first_op_done = True r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) del s2 From c837a173fab41b7132a72ab01b256b714bd6adb2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 30 Oct 2023 15:29:45 -0400 Subject: [PATCH 142/420] Fix some memory issues in sub quad attention. --- comfy/ldm/modules/attention.py | 35 +++++++++++----------------------- 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 9840cc7f5c8..016795a5974 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -160,32 +160,19 @@ def attention_sub_quad(query, key, value, heads, mask=None): mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True) - chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD - kv_chunk_size_min = None + kv_chunk_size = None + query_chunk_size = None + + for x in [4096, 2048, 1024, 512, 256]: + count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0) + if count >= k_tokens: + kv_chunk_size = k_tokens + query_chunk_size = x + break - #not sure at all about the math here - #TODO: tweak this - if mem_free_total > 8192 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 4 - elif mem_free_total > 4096 * 1024 * 1024 * 1.3: - query_chunk_size_x = 1024 * 2 - else: - query_chunk_size_x = 1024 - kv_chunk_size_min_x = None - kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024 - if kv_chunk_size_x < 1024: - kv_chunk_size_x = None - - if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: - # the big matmul fits into our memory limit; do everything in 1 chunk, - # i.e. send it down the unchunked fast-path - query_chunk_size = q_tokens - kv_chunk_size = k_tokens - else: - query_chunk_size = query_chunk_size_x - kv_chunk_size = kv_chunk_size_x - kv_chunk_size_min = kv_chunk_size_min_x + if query_chunk_size is None: + query_chunk_size = 512 hidden_states = efficient_dot_product_attention( query, From 23c5d17837f788df77cfa80a0453d7cdddfe0fe8 Mon Sep 17 00:00:00 2001 From: tsone Date: Tue, 31 Oct 2023 20:54:33 +0100 Subject: [PATCH 143/420] Added Bayer dithering to Quantize node. --- comfy_extras/nodes_post_processing.py | 46 +++++++++++++++++++++------ 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 3f651e59456..324cfe105f2 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -126,7 +126,7 @@ def INPUT_TYPES(s): "max": 256, "step": 1 }), - "dither": (["none", "floyd-steinberg"],), + "dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],), }, } @@ -135,19 +135,47 @@ def INPUT_TYPES(s): CATEGORY = "image/postprocessing" - def quantize(self, image: torch.Tensor, colors: int = 256, dither: str = "FLOYDSTEINBERG"): + def bayer(im, pal_im, order): + def normalized_bayer_matrix(n): + if n == 0: + return np.zeros((1,1), "float32") + else: + q = 4 ** n + m = q * normalized_bayer_matrix(n - 1) + return np.bmat(((m-1.5, m+0.5), (m+1.5, m-0.5))) / q + + num_colors = len(pal_im.getpalette()) // 3 + spread = 2 * 256 / num_colors + bayer_n = int(math.log2(order)) + bayer_matrix = torch.from_numpy(spread * normalized_bayer_matrix(bayer_n) + 0.5) + + result = torch.from_numpy(np.array(im).astype(np.float32)) + tw = math.ceil(result.shape[0] / bayer_matrix.shape[0]) + th = math.ceil(result.shape[1] / bayer_matrix.shape[1]) + tiled_matrix = bayer_matrix.tile(tw, th).unsqueeze(-1) + result.add_(tiled_matrix[:result.shape[0],:result.shape[1]]).clamp_(0, 255) + result = result.to(dtype=torch.uint8) + + im = Image.fromarray(result.cpu().numpy()) + im = im.quantize(palette=pal_im, dither=Image.Dither.NONE) + return im + + def quantize(self, image: torch.Tensor, colors: int, dither: str): batch_size, height, width, _ = image.shape result = torch.zeros_like(image) - dither_option = Image.Dither.FLOYDSTEINBERG if dither == "floyd-steinberg" else Image.Dither.NONE - for b in range(batch_size): - tensor_image = image[b] - img = (tensor_image * 255).to(torch.uint8).numpy() - pil_image = Image.fromarray(img, mode='RGB') + im = Image.fromarray((image[b] * 255).to(torch.uint8).numpy(), mode='RGB') + + pal_im = im.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 - palette = pil_image.quantize(colors=colors) # Required as described in https://github.com/python-pillow/Pillow/issues/5836 - quantized_image = pil_image.quantize(colors=colors, palette=palette, dither=dither_option) + if dither == "none": + quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.NONE) + elif dither == "floyd-steinberg": + quantized_image = im.quantize(palette=pal_im, dither=Image.Dither.FLOYDSTEINBERG) + elif dither.startswith("bayer"): + order = int(dither.split('-')[-1]) + quantized_image = Quantize.bayer(im, pal_im, order) quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 result[b] = quantized_array From 1777b54d0217e77a6a64b0a587b9b11a48e3bf02 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 17:33:43 -0400 Subject: [PATCH 144/420] Sampling code changes. apply_model in model_base now returns the denoised output. This means that sampling_function now computes things on the denoised output instead of the model output. This should make things more consistent across current and future models. --- comfy/extra_samplers/uni_pc.py | 8 ++- comfy/model_base.py | 119 ++++++++++++++++++++++++++------- comfy/samplers.py | 72 ++++++++++---------- 3 files changed, 135 insertions(+), 64 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 9d5f0c60bdc..1a7a8392902 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -852,6 +852,12 @@ def marginal_lambda(self, t): log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) return log_mean_coeff - log_std +def predict_eps_sigma(model, input, sigma_in, **kwargs): + sigma = sigma_in.view(sigma_in.shape[:1] + (1,) * (input.ndim - 1)) + input = input * ((sigma ** 2 + 1.0) ** 0.5) + return (input - model(input, sigma_in, **kwargs)) / sigma + + def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): timesteps = sigmas.clone() if sigmas[-1] == 0: @@ -874,7 +880,7 @@ def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, ex model_type = "noise" model_fn = model_wrapper( - model.predict_eps_sigma, + lambda input, sigma, **kwargs: predict_eps_sigma(model, input, sigma, **kwargs), ns, model_type=model_type, guidance_type="uncond", diff --git a/comfy/model_base.py b/comfy/model_base.py index ea3ea61f213..b8d04a2c84f 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -13,6 +13,90 @@ class ModelType(Enum): EPS = 1 V_PREDICTION = 2 + +#NOTE: all this sampling stuff will be moved +class EPS: + def calculate_input(self, sigma, noise): + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input - model_output * sigma + + +class V_PREDICTION(EPS): + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + +class ModelSamplingDiscrete(torch.nn.Module): + def __init__(self, model_config): + super().__init__() + self._register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + self.sigma_data = 1.0 + + def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if given_betas is not None: + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + + # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) + + sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32) + + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) + + def sigma(self, timestep): + t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + +def model_sampling(model_config, model_type): + if model_type == ModelType.EPS: + c = EPS + elif model_type == ModelType.V_PREDICTION: + c = V_PREDICTION + + s = ModelSamplingDiscrete + + class ModelSampling(s, c): + pass + + return ModelSampling(model_config) + + + class BaseModel(torch.nn.Module): def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__() @@ -20,10 +104,12 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None): unet_config = model_config.unet_config self.latent_format = model_config.latent_format self.model_config = model_config - self.register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + if not unet_config.get("disable_unet_model_creation", False): self.diffusion_model = UNetModel(**unet_config, device=device) self.model_type = model_type + self.model_sampling = model_sampling(model_config, model_type) + self.adm_channels = unet_config.get("adm_in_channels", None) if self.adm_channels is None: self.adm_channels = 0 @@ -31,39 +117,22 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None): print("model_type", model_type.name) print("adm", self.adm_channels) - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if given_betas is not None: - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - - self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) - self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) - self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): + sigma = t + xc = self.model_sampling.calculate_input(sigma, x) if c_concat is not None: - xc = torch.cat([x] + [c_concat], dim=1) - else: - xc = x + xc = torch.cat([xc] + [c_concat], dim=1) + context = c_crossattn dtype = self.get_dtype() xc = xc.to(dtype) - t = t.to(dtype) + t = self.model_sampling.timestep(t).to(dtype) context = context.to(dtype) extra_conds = {} for o in kwargs: extra_conds[o] = kwargs[o].to(dtype) - return self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + return self.model_sampling.calculate_denoised(sigma, model_output, x) def get_dtype(self): return self.diffusion_model.dtype diff --git a/comfy/samplers.py b/comfy/samplers.py index f930aa39bb3..5f9c74557b1 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -13,7 +13,7 @@ #The main sampling function shared by all the samplers -#Returns predicted noise +#Returns denoised def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) @@ -257,24 +257,15 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot else: return uncond + (cond - uncond) * cond_scale - -class CompVisVDenoiser(k_diffusion_external.DiscreteVDDPMDenoiser): - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_v(self, x, t, cond, **kwargs): - return self.inner_model.apply_model(x, t, cond, **kwargs) - - class CFGNoisePredictor(torch.nn.Module): def __init__(self, model): super().__init__() self.inner_model = model - self.alphas_cumprod = model.alphas_cumprod def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out - + def forward(self, *args, **kwargs): + return self.apply_model(*args, **kwargs) class KSamplerX0Inpaint(torch.nn.Module): def __init__(self, model): @@ -293,32 +284,40 @@ def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_option return out def simple_scheduler(model, steps): + s = model.model_sampling sigs = [] - ss = len(model.sigmas) / steps + ss = len(s.sigmas) / steps for x in range(steps): - sigs += [float(model.sigmas[-(1 + int(x * ss))])] + sigs += [float(s.sigmas[-(1 + int(x * ss))])] sigs += [0.0] return torch.FloatTensor(sigs) def ddim_scheduler(model, steps): + s = model.model_sampling sigs = [] - ddim_timesteps = make_ddim_timesteps(ddim_discr_method="uniform", num_ddim_timesteps=steps, num_ddpm_timesteps=model.inner_model.inner_model.num_timesteps, verbose=False) - for x in range(len(ddim_timesteps) - 1, -1, -1): - ts = ddim_timesteps[x] - if ts > 999: - ts = 999 - sigs.append(model.t_to_sigma(torch.tensor(ts))) + ss = len(s.sigmas) // steps + x = 1 + while x < len(s.sigmas): + sigs += [float(s.sigmas[x])] + x += ss + sigs = sigs[::-1] sigs += [0.0] return torch.FloatTensor(sigs) -def sgm_scheduler(model, steps): +def normal_scheduler(model, steps, sgm=False, floor=False): + s = model.model_sampling + start = s.timestep(s.sigma_max) + end = s.timestep(s.sigma_min) + + if sgm: + timesteps = torch.linspace(start, end, steps + 1)[:-1] + else: + timesteps = torch.linspace(start, end, steps) + sigs = [] - timesteps = torch.linspace(model.inner_model.inner_model.num_timesteps - 1, 0, steps + 1)[:-1].type(torch.int) for x in range(len(timesteps)): ts = timesteps[x] - if ts > 999: - ts = 999 - sigs.append(model.t_to_sigma(torch.tensor(ts))) + sigs.append(s.sigma(ts)) sigs += [0.0] return torch.FloatTensor(sigs) @@ -508,7 +507,9 @@ def sample(self): pass def max_denoise(self, model_wrap, sigmas): - return math.isclose(float(model_wrap.sigma_max), float(sigmas[0]), rel_tol=1e-05) + max_sigma = float(model_wrap.inner_model.model_sampling.sigma_max) + sigma = float(sigmas[0]) + return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma class DDIM(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): @@ -592,11 +593,7 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N def wrap_model(model): model_denoise = CFGNoisePredictor(model) - if model.model_type == model_base.ModelType.V_PREDICTION: - model_wrap = CompVisVDenoiser(model_denoise, quantize=True) - else: - model_wrap = k_diffusion_external.CompVisDenoiser(model_denoise, quantize=True) - return model_wrap + return model_denoise def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model_options={}, latent_image=None, denoise_mask=None, callback=None, disable_pbar=False, seed=None): positive = positive[:] @@ -637,19 +634,18 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] def calculate_sigmas_scheduler(model, scheduler_name, steps): - model_wrap = wrap_model(model) if scheduler_name == "karras": - sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + sigmas = k_diffusion_sampling.get_sigmas_karras(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max)) elif scheduler_name == "exponential": - sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model_wrap.sigma_min), sigma_max=float(model_wrap.sigma_max)) + sigmas = k_diffusion_sampling.get_sigmas_exponential(n=steps, sigma_min=float(model.model_sampling.sigma_min), sigma_max=float(model.model_sampling.sigma_max)) elif scheduler_name == "normal": - sigmas = model_wrap.get_sigmas(steps) + sigmas = normal_scheduler(model, steps) elif scheduler_name == "simple": - sigmas = simple_scheduler(model_wrap, steps) + sigmas = simple_scheduler(model, steps) elif scheduler_name == "ddim_uniform": - sigmas = ddim_scheduler(model_wrap, steps) + sigmas = ddim_scheduler(model, steps) elif scheduler_name == "sgm_uniform": - sigmas = sgm_scheduler(model_wrap, steps) + sigmas = normal_scheduler(model, steps, sgm=True) else: print("error invalid scheduler", self.scheduler) return sigmas From a268a574fab025deed91f5201910ac052132c42c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 18:11:29 -0400 Subject: [PATCH 145/420] Remove a bunch of useless code. DDIM is the same as euler with a small difference in the inpaint code. DDIM uses randn_like but I set a fixed seed instead. I'm keeping it in because I'm sure if I remove it people are going to complain. --- comfy/ldm/models/diffusion/__init__.py | 0 comfy/ldm/models/diffusion/ddim.py | 418 ------ .../models/diffusion/dpm_solver/__init__.py | 1 - .../models/diffusion/dpm_solver/dpm_solver.py | 1163 ----------------- .../models/diffusion/dpm_solver/sampler.py | 96 -- comfy/ldm/models/diffusion/plms.py | 245 ---- comfy/ldm/models/diffusion/sampling_util.py | 22 - comfy/samplers.py | 47 +- 8 files changed, 7 insertions(+), 1985 deletions(-) delete mode 100644 comfy/ldm/models/diffusion/__init__.py delete mode 100644 comfy/ldm/models/diffusion/ddim.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/__init__.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py delete mode 100644 comfy/ldm/models/diffusion/dpm_solver/sampler.py delete mode 100644 comfy/ldm/models/diffusion/plms.py delete mode 100644 comfy/ldm/models/diffusion/sampling_util.py diff --git a/comfy/ldm/models/diffusion/__init__.py b/comfy/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/comfy/ldm/models/diffusion/ddim.py b/comfy/ldm/models/diffusion/ddim.py deleted file mode 100644 index 433d48e3064..00000000000 --- a/comfy/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,418 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm - -from comfy.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - self.device = device - self.parameterization = kwargs.get("parameterization", "eps") - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.float().to(self.device) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - self.make_schedule_timesteps(ddim_timesteps, ddim_eta=ddim_eta, verbose=verbose) - - def make_schedule_timesteps(self, ddim_timesteps, ddim_eta=0., verbose=True): - self.ddim_timesteps = torch.tensor(ddim_timesteps) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device) - - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample_custom(self, - ddim_timesteps, - conditioning=None, - callback=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - denoise_function=None, - extra_args=None, - to_zero=True, - end_step=None, - disable_pbar=False, - **kwargs - ): - self.make_schedule_timesteps(ddim_timesteps=ddim_timesteps, ddim_eta=eta, verbose=verbose) - samples, intermediates = self.ddim_sampling(conditioning, x_T.shape, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule, - denoise_function=denoise_function, - extra_args=extra_args, - to_zero=to_zero, - end_step=end_step, - disable_pbar=disable_pbar - ) - return samples, intermediates - - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - ucg_schedule=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ucg_schedule=ucg_schedule, - denoise_function=None, - extra_args=None - ) - return samples, intermediates - - def q_sample(self, x_start, t, noise=None): - if noise is None: - noise = torch.randn_like(x_start) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, - ucg_schedule=None, denoise_function=None, extra_args=None, to_zero=True, end_step=None, disable_pbar=False): - device = self.model.alphas_cumprod.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else timesteps.flip(0) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - # print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range[:end_step], desc='DDIM Sampler', total=end_step, disable=disable_pbar) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - if ucg_schedule is not None: - assert len(ucg_schedule) == len(time_range) - unconditional_guidance_scale = ucg_schedule[i] - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, denoise_function=denoise_function, extra_args=extra_args) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - if to_zero: - img = pred_x0 - else: - if ddim_use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - img /= sqrt_alphas_cumprod[index - 1] - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None, denoise_function=None, extra_args=None): - b, *_, device = *x.shape, x.device - - if denoise_function is not None: - model_output = denoise_function(x, t, **extra_args) - elif unconditional_conditioning is None or unconditional_guidance_scale == 1.: - model_output = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [torch.cat([ - unconditional_conditioning[k][i], - c[k][i]]) for i in range(len(c[k]))] - else: - c_in[k] = torch.cat([ - unconditional_conditioning[k], - c[k]]) - elif isinstance(c, list): - c_in = list() - assert isinstance(unconditional_conditioning, list) - for i in range(len(c)): - c_in.append(torch.cat([unconditional_conditioning[i], c[i]])) - else: - c_in = torch.cat([unconditional_conditioning, c]) - model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond) - - if self.parameterization == "v": - e_t = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * model_output + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x - else: - e_t = model_output - - if score_corrector is not None: - assert self.parameterization == "eps", 'not implemented' - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - if self.parameterization != "v": - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - else: - pred_x0 = extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * x - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * model_output - - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - - if dynamic_threshold is not None: - raise NotImplementedError() - - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, - unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None): - num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] - - assert t_enc <= num_reference_steps - num_steps = t_enc - - if use_original_steps: - alphas_next = self.alphas_cumprod[:num_steps] - alphas = self.alphas_cumprod_prev[:num_steps] - else: - alphas_next = self.ddim_alphas[:num_steps] - alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) - - x_next = x0 - intermediates = [] - inter_steps = [] - for i in tqdm(range(num_steps), desc='Encoding Image'): - t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) - if unconditional_guidance_scale == 1.: - noise_pred = self.model.apply_model(x_next, t, c) - else: - assert unconditional_conditioning is not None - e_t_uncond, noise_pred = torch.chunk( - self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), - torch.cat((unconditional_conditioning, c))), 2) - noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) - - xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next - weighted_noise_pred = alphas_next[i].sqrt() * ( - (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred - x_next = xt_weighted + weighted_noise_pred - if return_intermediates and i % ( - num_steps // return_intermediates) == 0 and i < num_steps - 1: - intermediates.append(x_next) - inter_steps.append(i) - elif return_intermediates and i >= num_steps - 2: - intermediates.append(x_next) - inter_steps.append(i) - if callback: callback(i) - - out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} - if return_intermediates: - out.update({'intermediates': intermediates}) - return x_next, out - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None, max_denoise=False): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - if max_denoise: - noise_multiplier = 1.0 - else: - noise_multiplier = extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) - - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + noise_multiplier * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False, callback=None): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - if callback: callback(i) - return x_dec \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/__init__.py b/comfy/ldm/models/diffusion/dpm_solver/__init__.py deleted file mode 100644 index 7427f38c075..00000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py b/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py deleted file mode 100644 index da8d41f9c5e..00000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/dpm_solver.py +++ /dev/null @@ -1,1163 +0,0 @@ -import torch -import torch.nn.functional as F -import math -from tqdm import tqdm - - -class NoiseScheduleVP: - def __init__( - self, - schedule='discrete', - betas=None, - alphas_cumprod=None, - continuous_beta_0=0.1, - continuous_beta_1=20., - ): - """Create a wrapper class for the forward SDE (VP type). - *** - Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. - We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. - *** - The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). - We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). - Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: - log_alpha_t = self.marginal_log_mean_coeff(t) - sigma_t = self.marginal_std(t) - lambda_t = self.marginal_lambda(t) - Moreover, as lambda(t) is an invertible function, we also support its inverse function: - t = self.inverse_lambda(lambda_t) - =============================================================== - We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). - 1. For discrete-time DPMs: - For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: - t_i = (i + 1) / N - e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. - We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. - Args: - betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) - alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) - Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. - **Important**: Please pay special attention for the args for `alphas_cumprod`: - The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that - q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). - Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have - alpha_{t_n} = \sqrt{\hat{alpha_n}}, - and - log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). - 2. For continuous-time DPMs: - We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise - schedule are the default settings in DDPM and improved-DDPM: - Args: - beta_min: A `float` number. The smallest beta for the linear schedule. - beta_max: A `float` number. The largest beta for the linear schedule. - cosine_s: A `float` number. The hyperparameter in the cosine schedule. - cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. - T: A `float` number. The ending time of the forward process. - =============================================================== - Args: - schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, - 'linear' or 'cosine' for continuous-time DPMs. - Returns: - A wrapper object of the forward SDE (VP type). - - =============================================================== - Example: - # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', betas=betas) - # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) - # For continuous-time DPMs (VPSDE), linear schedule: - >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) - """ - - if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError( - "Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format( - schedule)) - - self.schedule = schedule - if schedule == 'discrete': - if betas is not None: - log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) - else: - assert alphas_cumprod is not None - log_alphas = 0.5 * torch.log(alphas_cumprod) - self.total_N = len(log_alphas) - self.T = 1. - self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) - self.log_alpha_array = log_alphas.reshape((1, -1,)) - else: - self.total_N = 1000 - self.beta_0 = continuous_beta_0 - self.beta_1 = continuous_beta_1 - self.cosine_s = 0.008 - self.cosine_beta_max = 999. - self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * ( - 1. + self.cosine_s) / math.pi - self.cosine_s - self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) - self.schedule = schedule - if schedule == 'cosine': - # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. - # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. - self.T = 0.9946 - else: - self.T = 1. - - def marginal_log_mean_coeff(self, t): - """ - Compute log(alpha_t) of a given continuous-time label t in [0, T]. - """ - if self.schedule == 'discrete': - return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), - self.log_alpha_array.to(t.device)).reshape((-1)) - elif self.schedule == 'linear': - return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 - elif self.schedule == 'cosine': - log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) - log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 - return log_alpha_t - - def marginal_alpha(self, t): - """ - Compute alpha_t of a given continuous-time label t in [0, T]. - """ - return torch.exp(self.marginal_log_mean_coeff(t)) - - def marginal_std(self, t): - """ - Compute sigma_t of a given continuous-time label t in [0, T]. - """ - return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) - - def marginal_lambda(self, t): - """ - Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. - """ - log_mean_coeff = self.marginal_log_mean_coeff(t) - log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) - return log_mean_coeff - log_std - - def inverse_lambda(self, lamb): - """ - Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. - """ - if self.schedule == 'linear': - tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - Delta = self.beta_0 ** 2 + tmp - return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) - elif self.schedule == 'discrete': - log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) - t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), - torch.flip(self.t_array.to(lamb.device), [1])) - return t.reshape((-1,)) - else: - log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * ( - 1. + self.cosine_s) / math.pi - self.cosine_s - t = t_fn(log_alpha) - return t - - -def model_wrapper( - model, - noise_schedule, - model_type="noise", - model_kwargs={}, - guidance_type="uncond", - condition=None, - unconditional_condition=None, - guidance_scale=1., - classifier_fn=None, - classifier_kwargs={}, -): - """Create a wrapper function for the noise prediction model. - DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to - firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. - We support four types of the diffusion model by setting `model_type`: - 1. "noise": noise prediction model. (Trained by predicting noise). - 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). - 3. "v": velocity prediction model. (Trained by predicting the velocity). - The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. - [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." - arXiv preprint arXiv:2202.00512 (2022). - [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." - arXiv preprint arXiv:2210.02303 (2022). - - 4. "score": marginal score function. (Trained by denoising score matching). - Note that the score function and the noise prediction model follows a simple relationship: - ``` - noise(x_t, t) = -sigma_t * score(x_t, t) - ``` - We support three types of guided sampling by DPMs by setting `guidance_type`: - 1. "uncond": unconditional sampling by DPMs. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - The input `classifier_fn` has the following format: - `` - classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) - `` - [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," - in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. - 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. - The input `model` has the following format: - `` - model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score - `` - And if cond == `unconditional_condition`, the model output is the unconditional DPM output. - [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." - arXiv preprint arXiv:2207.12598 (2022). - - The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) - or continuous-time labels (i.e. epsilon to T). - We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: - `` - def model_fn(x, t_continuous) -> noise: - t_input = get_model_input_time(t_continuous) - return noise_pred(model, x, t_input, **model_kwargs) - `` - where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. - =============================================================== - Args: - model: A diffusion model with the corresponding format described above. - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - model_type: A `str`. The parameterization type of the diffusion model. - "noise" or "x_start" or "v" or "score". - model_kwargs: A `dict`. A dict for the other inputs of the model function. - guidance_type: A `str`. The type of the guidance for sampling. - "uncond" or "classifier" or "classifier-free". - condition: A pytorch tensor. The condition for the guided sampling. - Only used for "classifier" or "classifier-free" guidance type. - unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. - Only used for "classifier-free" guidance type. - guidance_scale: A `float`. The scale for the guided sampling. - classifier_fn: A classifier function. Only used for the classifier guidance. - classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. - Returns: - A noise prediction model that accepts the noised data and the continuous time as the inputs. - """ - - def get_model_input_time(t_continuous): - """ - Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. - For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. - For continuous-time DPMs, we just use `t_continuous`. - """ - if noise_schedule.schedule == 'discrete': - return (t_continuous - 1. / noise_schedule.total_N) * 1000. - else: - return t_continuous - - def noise_pred_fn(x, t_continuous, cond=None): - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - t_input = get_model_input_time(t_continuous) - if cond is None: - output = model(x, t_input, **model_kwargs) - else: - output = model(x, t_input, cond, **model_kwargs) - if model_type == "noise": - return output - elif model_type == "x_start": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) - elif model_type == "v": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x - elif model_type == "score": - sigma_t = noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return -expand_dims(sigma_t, dims) * output - - def cond_grad_fn(x, t_input): - """ - Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). - """ - with torch.enable_grad(): - x_in = x.detach().requires_grad_(True) - log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) - return torch.autograd.grad(log_prob.sum(), x_in)[0] - - def model_fn(x, t_continuous): - """ - The noise predicition model function that is used for DPM-Solver. - """ - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - if guidance_type == "uncond": - return noise_pred_fn(x, t_continuous) - elif guidance_type == "classifier": - assert classifier_fn is not None - t_input = get_model_input_time(t_continuous) - cond_grad = cond_grad_fn(x, t_input) - sigma_t = noise_schedule.marginal_std(t_continuous) - noise = noise_pred_fn(x, t_continuous) - return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad - elif guidance_type == "classifier-free": - if guidance_scale == 1. or unconditional_condition is None: - return noise_pred_fn(x, t_continuous, cond=condition) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t_continuous] * 2) - if isinstance(condition, dict): - assert isinstance(unconditional_condition, dict) - c_in = dict() - for k in condition: - if isinstance(condition[k], list): - c_in[k] = [torch.cat([unconditional_condition[k][i], condition[k][i]]) for i in range(len(condition[k]))] - else: - c_in[k] = torch.cat([unconditional_condition[k], condition[k]]) - else: - c_in = torch.cat([unconditional_condition, condition]) - noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) - return noise_uncond + guidance_scale * (noise - noise_uncond) - - assert model_type in ["noise", "x_start", "v"] - assert guidance_type in ["uncond", "classifier", "classifier-free"] - return model_fn - - -class DPM_Solver: - def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): - """Construct a DPM-Solver. - We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). - If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). - If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). - In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. - The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. - Args: - model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): - `` - def model_fn(x, t_continuous): - return noise - `` - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. - thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. - max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. - - [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. - """ - self.model = model_fn - self.noise_schedule = noise_schedule - self.predict_x0 = predict_x0 - self.thresholding = thresholding - self.max_val = max_val - - def noise_prediction_fn(self, x, t): - """ - Return the noise prediction model. - """ - return self.model(x, t) - - def data_prediction_fn(self, x, t): - """ - Return the data prediction model (with thresholding). - """ - noise = self.noise_prediction_fn(x, t) - dims = x.dim() - alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) - x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) - if self.thresholding: - p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. - s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) - s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) - x0 = torch.clamp(x0, -s, s) / s - return x0 - - def model_fn(self, x, t): - """ - Convert the model to the noise prediction model or the data prediction model. - """ - if self.predict_x0: - return self.data_prediction_fn(x, t) - else: - return self.noise_prediction_fn(x, t) - - def get_time_steps(self, skip_type, t_T, t_0, N, device): - """Compute the intermediate time steps for sampling. - Args: - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - N: A `int`. The total number of the spacing of the time steps. - device: A torch device. - Returns: - A pytorch tensor of the time steps, with the shape (N + 1,). - """ - if skip_type == 'logSNR': - lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) - lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) - logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) - return self.noise_schedule.inverse_lambda(logSNR_steps) - elif skip_type == 'time_uniform': - return torch.linspace(t_T, t_0, N + 1).to(device) - elif skip_type == 'time_quadratic': - t_order = 2 - t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device) - return t - else: - raise ValueError( - "Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) - - def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): - """ - Get the order of each step for sampling by the singlestep DPM-Solver. - We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". - Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - - If order == 1: - We take `steps` of DPM-Solver-1 (i.e. DDIM). - - If order == 2: - - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of DPM-Solver-2. - - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If order == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. - ============================================ - Args: - order: A `int`. The max order for the solver (2 or 3). - steps: A `int`. The total number of function evaluations (NFE). - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - device: A torch device. - Returns: - orders: A list of the solver order of each step. - """ - if order == 3: - K = steps // 3 + 1 - if steps % 3 == 0: - orders = [3, ] * (K - 2) + [2, 1] - elif steps % 3 == 1: - orders = [3, ] * (K - 1) + [1] - else: - orders = [3, ] * (K - 1) + [2] - elif order == 2: - if steps % 2 == 0: - K = steps // 2 - orders = [2, ] * K - else: - K = steps // 2 + 1 - orders = [2, ] * (K - 1) + [1] - elif order == 1: - K = 1 - orders = [1, ] * steps - else: - raise ValueError("'order' must be '1' or '2' or '3'.") - if skip_type == 'logSNR': - # To reproduce the results in DPM-Solver paper - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) - else: - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ - torch.cumsum(torch.tensor([0, ] + orders)).to(device)] - return timesteps_outer, orders - - def denoise_to_zero_fn(self, x, s): - """ - Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. - """ - return self.data_prediction_fn(x, s) - - def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): - """ - DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - if self.predict_x0: - phi_1 = torch.expm1(-h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - else: - phi_1 = torch.expm1(h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - - def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, - solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-2 from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the second-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 0.5 - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - s1 = ns.inverse_lambda(lambda_s1) - log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff( - s1), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) - alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_1 = torch.expm1(-h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * ( - model_s1 - model_s) - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_1 = torch.expm1(h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) - ) - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1} - else: - return x_t - - def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None, - return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-3 from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). - If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 1. / 3. - if r2 is None: - r2 = 2. / 3. - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - lambda_s2 = lambda_s + r2 * h - s1 = ns.inverse_lambda(lambda_s1) - s2 = ns.inverse_lambda(lambda_s2) - log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff( - s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std( - s2), ns.marginal_std(t) - alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_12 = torch.expm1(-r2 * h) - phi_1 = torch.expm1(-h) - phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. - phi_2 = phi_1 / h + 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(sigma_s2 / sigma_s, dims) * x - - expand_dims(alpha_s2 * phi_12, dims) * model_s - + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + expand_dims(alpha_t * phi_2, dims) * D1 - - expand_dims(alpha_t * phi_3, dims) * D2 - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_12 = torch.expm1(r2 * h) - phi_1 = torch.expm1(h) - phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. - phi_2 = phi_1 / h - 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x - - expand_dims(sigma_s2 * phi_12, dims) * model_s - - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - expand_dims(sigma_t * phi_2, dims) * D1 - - expand_dims(sigma_t * phi_3, dims) * D2 - ) - - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} - else: - return x_t - - def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): - """ - Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - ns = self.noise_schedule - dims = x.dim() - model_prev_1, model_prev_0 = model_prev_list - t_prev_1, t_prev_0 = t_prev_list - lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda( - t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0 = h_0 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - if self.predict_x0: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 - ) - else: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 - ) - return x_t - - def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): - """ - Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - model_prev_2, model_prev_1, model_prev_0 = model_prev_list - t_prev_2, t_prev_1, t_prev_0 = t_prev_list - lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda( - t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_1 = lambda_prev_1 - lambda_prev_2 - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0, r1 = h_0 / h, h_1 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) - D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) - D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) - if self.predict_x0: - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 - - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h ** 2 - 0.5), dims) * D2 - ) - else: - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 - - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h ** 2 - 0.5), dims) * D2 - ) - return x_t - - def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, - r2=None): - """ - Singlestep DPM-Solver with the order `order` from time `s` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - r1: A `float`. The hyperparameter of the second-order or third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) - elif order == 2: - return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, - solver_type=solver_type, r1=r1) - elif order == 3: - return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, - solver_type=solver_type, r1=r1, r2=r2) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): - """ - Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) - elif order == 2: - return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - elif order == 3: - return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, - solver_type='dpm_solver'): - """ - The adaptive step size solver based on singlestep DPM-Solver. - Args: - x: A pytorch tensor. The initial value at time `t_T`. - order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - h_init: A `float`. The initial step size (for logSNR). - atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. - rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. - theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. - t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the - current time and `t_0` is less than `t_err`. The default setting is 1e-5. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_0: A pytorch tensor. The approximated solution at time `t_0`. - [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. - """ - ns = self.noise_schedule - s = t_T * torch.ones((x.shape[0],)).to(x) - lambda_s = ns.marginal_lambda(s) - lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) - h = h_init * torch.ones_like(s).to(x) - x_prev = x - nfe = 0 - if order == 2: - r1 = 0.5 - lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, - solver_type=solver_type, - **kwargs) - elif order == 3: - r1, r2 = 1. / 3., 2. / 3. - lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, - return_intermediate=True, - solver_type=solver_type) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, - solver_type=solver_type, - **kwargs) - else: - raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) - while torch.abs((s - t_0)).mean() > t_err: - t = ns.inverse_lambda(lambda_s + h) - x_lower, lower_noise_kwargs = lower_update(x, s, t) - x_higher = higher_update(x, s, t, **lower_noise_kwargs) - delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) - norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) - E = norm_fn((x_higher - x_lower) / delta).max() - if torch.all(E <= 1.): - x = x_higher - s = t - x_prev = x_lower - lambda_s = ns.marginal_lambda(s) - h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) - nfe += order - print('adaptive solver nfe', nfe) - return x - - def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', - method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, - ): - """ - Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. - ===================================================== - We support the following algorithms for both noise prediction model and data prediction model: - - 'singlestep': - Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. - We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). - The total number of function evaluations (NFE) == `steps`. - Given a fixed NFE == `steps`, the sampling procedure is: - - If `order` == 1: - - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. - - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If `order` == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. - - 'multistep': - Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. - We initialize the first `order` values by lower order multistep solvers. - Given a fixed NFE == `steps`, the sampling procedure is: - Denote K = steps. - - If `order` == 1: - - We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. - - If `order` == 3: - - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. - - 'singlestep_fixed': - Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). - We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. - - 'adaptive': - Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). - We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. - You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs - (NFE) and the sample quality. - - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. - - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. - ===================================================== - Some advices for choosing the algorithm: - - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: - Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, - skip_type='time_uniform', method='singlestep') - - For **guided sampling with large guidance scale** by DPMs: - Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, - skip_type='time_uniform', method='multistep') - We support three types of `skip_type`: - - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** - - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. - - 'time_quadratic': quadratic time for the time steps. - ===================================================== - Args: - x: A pytorch tensor. The initial value at time `t_start` - e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. - steps: A `int`. The total number of function evaluations (NFE). - t_start: A `float`. The starting time of the sampling. - If `T` is None, we use self.noise_schedule.T (default is 1.0). - t_end: A `float`. The ending time of the sampling. - If `t_end` is None, we use 1. / self.noise_schedule.total_N. - e.g. if total_N == 1000, we have `t_end` == 1e-3. - For discrete-time DPMs: - - We recommend `t_end` == 1. / self.noise_schedule.total_N. - For continuous-time DPMs: - - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. - order: A `int`. The order of DPM-Solver. - skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. - method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. - denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. - Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). - This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and - score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID - for diffusion models sampling by diffusion SDEs for low-resolutional images - (such as CIFAR-10). However, we observed that such trick does not matter for - high-resolutional images. As it needs an additional NFE, we do not recommend - it for high-resolutional images. - lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. - Only valid for `method=multistep` and `steps < 15`. We empirically find that - this trick is a key to stabilizing the sampling by DPM-Solver with very few steps - (especially for steps <= 10). So we recommend to set it to be `True`. - solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. - atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - Returns: - x_end: A pytorch tensor. The approximated solution at time `t_end`. - """ - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start - device = x.device - if method == 'adaptive': - with torch.no_grad(): - x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, - solver_type=solver_type) - elif method == 'multistep': - assert steps >= order - timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) - assert timesteps.shape[0] - 1 == steps - with torch.no_grad(): - vec_t = timesteps[0].expand((x.shape[0])) - model_prev_list = [self.model_fn(x, vec_t)] - t_prev_list = [vec_t] - # Init the first `order` values by lower order multistep DPM-Solver. - for init_order in tqdm(range(1, order), desc="DPM init order"): - vec_t = timesteps[init_order].expand(x.shape[0]) - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, - solver_type=solver_type) - model_prev_list.append(self.model_fn(x, vec_t)) - t_prev_list.append(vec_t) - # Compute the remaining values by `order`-th order multistep DPM-Solver. - for step in tqdm(range(order, steps + 1), desc="DPM multistep"): - vec_t = timesteps[step].expand(x.shape[0]) - if lower_order_final and steps < 15: - step_order = min(order, steps + 1 - step) - else: - step_order = order - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, - solver_type=solver_type) - for i in range(order - 1): - t_prev_list[i] = t_prev_list[i + 1] - model_prev_list[i] = model_prev_list[i + 1] - t_prev_list[-1] = vec_t - # We do not need to evaluate the final model value. - if step < steps: - model_prev_list[-1] = self.model_fn(x, vec_t) - elif method in ['singlestep', 'singlestep_fixed']: - if method == 'singlestep': - timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, - skip_type=skip_type, - t_T=t_T, t_0=t_0, - device=device) - elif method == 'singlestep_fixed': - K = steps // order - orders = [order, ] * K - timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) - for i, order in enumerate(orders): - t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] - timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), - N=order, device=device) - lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) - vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) - h = lambda_inner[-1] - lambda_inner[0] - r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h - r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h - x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) - return x - - -############################################################# -# other utility functions -############################################################# - -def interpolate_fn(x, xp, yp): - """ - A piecewise linear function y = f(x), using xp and yp as keypoints. - We implement f(x) in a differentiable way (i.e. applicable for autograd). - The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) - Args: - x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). - xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. - yp: PyTorch tensor with shape [C, K]. - Returns: - The function values f(x), with shape [N, C]. - """ - N, K = x.shape[0], xp.shape[1] - all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) - sorted_all_x, x_indices = torch.sort(all_x, dim=2) - x_idx = torch.argmin(x_indices, dim=2) - cand_start_idx = x_idx - 1 - start_idx = torch.where( - torch.eq(x_idx, 0), - torch.tensor(1, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) - start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) - end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) - start_idx2 = torch.where( - torch.eq(x_idx, 0), - torch.tensor(0, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) - start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) - end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) - cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) - return cand - - -def expand_dims(v, dims): - """ - Expand the tensor `v` to the dim `dims`. - Args: - `v`: a PyTorch tensor with shape [N]. - `dim`: a `int`. - Returns: - a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. - """ - return v[(...,) + (None,) * (dims - 1)] \ No newline at end of file diff --git a/comfy/ldm/models/diffusion/dpm_solver/sampler.py b/comfy/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index e4d0d0a3875..00000000000 --- a/comfy/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,96 +0,0 @@ -"""SAMPLING ONLY.""" -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - -MODEL_TYPES = { - "eps": "noise", - "v": "v" -} - - -class DPMSolverSampler(object): - def __init__(self, model, device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.device = device - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.to(self.device) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): ctmp = ctmp[0] - if isinstance(ctmp, torch.Tensor): - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - elif isinstance(conditioning, list): - for ctmp in conditioning: - if ctmp.shape[0] != batch_size: - print(f"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}") - else: - if isinstance(conditioning, torch.Tensor): - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type=MODEL_TYPES[self.model.parameterization], - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, - lower_order_final=True) - - return x.to(device), None diff --git a/comfy/ldm/models/diffusion/plms.py b/comfy/ldm/models/diffusion/plms.py deleted file mode 100644 index 9d31b3994ed..00000000000 --- a/comfy/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,245 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like -from ldm.models.diffusion.sampling_util import norm_thresholding - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", device=torch.device("cuda"), **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - self.device = device - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != self.device: - attr = attr.to(self.device) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - dynamic_threshold=None, - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - dynamic_threshold=dynamic_threshold, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, - dynamic_threshold=None): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next, - dynamic_threshold=dynamic_threshold) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None, - dynamic_threshold=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - if dynamic_threshold is not None: - pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/comfy/ldm/models/diffusion/sampling_util.py b/comfy/ldm/models/diffusion/sampling_util.py deleted file mode 100644 index 7eff02be6d7..00000000000 --- a/comfy/ldm/models/diffusion/sampling_util.py +++ /dev/null @@ -1,22 +0,0 @@ -import torch -import numpy as np - - -def append_dims(x, target_dims): - """Appends dimensions to the end of a tensor until it has target_dims dimensions. - From https://github.com/crowsonkb/k-diffusion/blob/master/k_diffusion/utils.py""" - dims_to_append = target_dims - x.ndim - if dims_to_append < 0: - raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less') - return x[(...,) + (None,) * dims_to_append] - - -def norm_thresholding(x0, value): - s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim) - return x0 * (value / s) - - -def spatial_norm_thresholding(x0, value): - # b c h w - s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value) - return x0 * (value / s) \ No newline at end of file diff --git a/comfy/samplers.py b/comfy/samplers.py index 5f9c74557b1..e10e02c41b9 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -4,8 +4,6 @@ import torch import enum from comfy import model_management -from .ldm.models.diffusion.ddim import DDIMSampler -from .ldm.modules.diffusionmodules.util import make_ddim_timesteps import math from comfy import model_base import comfy.utils @@ -511,41 +509,6 @@ def max_denoise(self, model_wrap, sigmas): sigma = float(sigmas[0]) return math.isclose(max_sigma, sigma, rel_tol=1e-05) or sigma > max_sigma -class DDIM(Sampler): - def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - timesteps = [] - for s in range(sigmas.shape[0]): - timesteps.insert(0, model_wrap.sigma_to_discrete_timestep(sigmas[s])) - noise_mask = None - if denoise_mask is not None: - noise_mask = 1.0 - denoise_mask - - ddim_callback = None - if callback is not None: - total_steps = len(timesteps) - 1 - ddim_callback = lambda pred_x0, i: callback(i, pred_x0, None, total_steps) - - max_denoise = self.max_denoise(model_wrap, sigmas) - - ddim_sampler = DDIMSampler(model_wrap.inner_model.inner_model, device=noise.device) - ddim_sampler.make_schedule_timesteps(ddim_timesteps=timesteps, verbose=False) - z_enc = ddim_sampler.stochastic_encode(latent_image, torch.tensor([len(timesteps) - 1] * noise.shape[0]).to(noise.device), noise=noise, max_denoise=max_denoise) - samples, _ = ddim_sampler.sample_custom(ddim_timesteps=timesteps, - batch_size=noise.shape[0], - shape=noise.shape[1:], - verbose=False, - eta=0.0, - x_T=z_enc, - x0=latent_image, - img_callback=ddim_callback, - denoise_function=model_wrap.predict_eps_discrete_timestep, - extra_args=extra_args, - mask=noise_mask, - to_zero=sigmas[-1]==0, - end_step=sigmas.shape[0] - 1, - disable_pbar=disable_pbar) - return samples - class UNIPC(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) @@ -558,13 +521,17 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] -def ksampler(sampler_name, extra_options={}): +def ksampler(sampler_name, extra_options={}, inpaint_options={}): class KSAMPLER(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): extra_args["denoise_mask"] = denoise_mask model_k = KSamplerX0Inpaint(model_wrap) model_k.latent_image = latent_image - model_k.noise = noise + if inpaint_options.get("random", False): #TODO: Should this be the default? + generator = torch.manual_seed(extra_args.get("seed", 41) + 1) + model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) + else: + model_k.noise = noise if self.max_denoise(model_wrap, sigmas): noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) @@ -656,7 +623,7 @@ def sampler_class(name): elif name == "uni_pc_bh2": sampler = UNIPCBH2 elif name == "ddim": - sampler = DDIM + sampler = ksampler("euler", inpaint_options={"random": True}) else: sampler = ksampler(name) return sampler From 7c0f255de16b78e54e0c051e9f7e1e46c7422c6c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 22:14:32 -0400 Subject: [PATCH 146/420] Clean up percent start/end and make controlnets work with sigmas. --- comfy/controlnet.py | 14 +++++++++++++- comfy/model_base.py | 5 ++++- comfy/samplers.py | 16 +++++++++------- 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 2a88dd01924..09868158287 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -132,6 +132,7 @@ def __init__(self, control_model, global_average_pooling=False, device=None): self.control_model = control_model self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) self.global_average_pooling = global_average_pooling + self.model_sampling_current = None def get_control(self, x_noisy, t, cond, batched_number): control_prev = None @@ -159,7 +160,10 @@ def get_control(self, x_noisy, t, cond, batched_number): y = cond.get('y', None) if y is not None: y = y.to(self.control_model.dtype) - control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=t, context=context.to(self.control_model.dtype), y=y) + timestep = self.model_sampling_current.timestep(t) + x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) + + control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(self.control_model.dtype), y=y) return self.control_merge(None, control, control_prev, output_dtype) def copy(self): @@ -172,6 +176,14 @@ def get_models(self): out.append(self.control_model_wrapped) return out + def pre_run(self, model, percent_to_timestep_function): + super().pre_run(model, percent_to_timestep_function) + self.model_sampling_current = model.model_sampling + + def cleanup(self): + self.model_sampling_current = None + super().cleanup() + class ControlLoraOps: class Linear(torch.nn.Module): def __init__(self, in_features: int, out_features: int, bias: bool = True, diff --git a/comfy/model_base.py b/comfy/model_base.py index b8d04a2c84f..84cf9829d92 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -82,6 +82,9 @@ def sigma(self, timestep): log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] return log_sigma.exp() + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) + def model_sampling(model_config, model_type): if model_type == ModelType.EPS: c = EPS @@ -126,7 +129,7 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans context = c_crossattn dtype = self.get_dtype() xc = xc.to(dtype) - t = self.model_sampling.timestep(t).to(dtype) + t = self.model_sampling.timestep(t).float() context = context.to(dtype) extra_conds = {} for o in kwargs: diff --git a/comfy/samplers.py b/comfy/samplers.py index e10e02c41b9..a74c8a1b832 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -415,15 +415,16 @@ def create_cond_with_same_area_if_none(conds, c): conds += [out] def calculate_start_end_timesteps(model, conds): + s = model.model_sampling for t in range(len(conds)): x = conds[t] timestep_start = None timestep_end = None if 'start_percent' in x: - timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['start_percent'] * 999.0))) + timestep_start = s.percent_to_sigma(x['start_percent']) if 'end_percent' in x: - timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x['end_percent'] * 999.0))) + timestep_end = s.percent_to_sigma(x['end_percent']) if (timestep_start is not None) or (timestep_end is not None): n = x.copy() @@ -434,14 +435,15 @@ def calculate_start_end_timesteps(model, conds): conds[t] = n def pre_run_control(model, conds): + s = model.model_sampling for t in range(len(conds)): x = conds[t] timestep_start = None timestep_end = None - percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) + percent_to_timestep_function = lambda a: s.percent_to_sigma(a) if 'control' in x: - x['control'].pre_run(model.inner_model.inner_model, percent_to_timestep_function) + x['control'].pre_run(model, percent_to_timestep_function) def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] @@ -571,8 +573,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model model_wrap = wrap_model(model) - calculate_start_end_timesteps(model_wrap, negative) - calculate_start_end_timesteps(model_wrap, positive) + calculate_start_end_timesteps(model, negative) + calculate_start_end_timesteps(model, positive) #make sure each cond area has an opposite one with the same area for c in positive: @@ -580,7 +582,7 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model for c in negative: create_cond_with_same_area_if_none(positive, c) - pre_run_control(model_wrap, negative + positive) + pre_run_control(model, negative + positive) apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) From 111f1b525526a850cf222d2bccec0cdb3e2c988b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 31 Oct 2023 23:19:02 -0400 Subject: [PATCH 147/420] Fix some issues with sampling precision. --- comfy/model_base.py | 4 ++-- comfy/samplers.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 84cf9829d92..37a52debf21 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -44,7 +44,7 @@ def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape @@ -56,7 +56,7 @@ def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - sigmas = torch.tensor(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, dtype=torch.float32) + sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 self.register_buffer('sigmas', sigmas) self.register_buffer('log_sigmas', sigmas.log()) diff --git a/comfy/samplers.py b/comfy/samplers.py index a74c8a1b832..518b666db2f 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -137,10 +137,10 @@ def cond_cat(c_list): def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) - out_count = torch.ones_like(x_in)/100000.0 + out_count = torch.zeros_like(x_in) out_uncond = torch.zeros_like(x_in) - out_uncond_count = torch.ones_like(x_in)/100000.0 + out_uncond_count = torch.zeros_like(x_in) COND = 0 UNCOND = 1 @@ -241,6 +241,8 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot out_uncond /= out_uncond_count del out_uncond_count + torch.nan_to_num(out_cond, nan=0.0, posinf=0.0, neginf=0.0, out=out_cond) #in case out_count or out_uncond_count had some zeros + torch.nan_to_num(out_uncond, nan=0.0, posinf=0.0, neginf=0.0, out=out_uncond) return out_cond, out_uncond From e73ec8c4dad72650e94a5c9fdad574b2d2dae66f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 00:01:30 -0400 Subject: [PATCH 148/420] Not used anymore. --- comfy/k_diffusion/external.py | 194 ---------------------------------- comfy/samplers.py | 1 - 2 files changed, 195 deletions(-) delete mode 100644 comfy/k_diffusion/external.py diff --git a/comfy/k_diffusion/external.py b/comfy/k_diffusion/external.py deleted file mode 100644 index 953d3db2c9f..00000000000 --- a/comfy/k_diffusion/external.py +++ /dev/null @@ -1,194 +0,0 @@ -import math - -import torch -from torch import nn - -from . import sampling, utils - - -class VDenoiser(nn.Module): - """A v-diffusion-pytorch model wrapper for k-diffusion.""" - - def __init__(self, inner_model): - super().__init__() - self.inner_model = inner_model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_skip, c_out, c_in - - def sigma_to_t(self, sigma): - return sigma.atan() / math.pi * 2 - - def t_to_sigma(self, t): - return (t * math.pi / 2).tan() - - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output = self.inner_model(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - target = (input - c_skip * noised_input) / c_out - return (model_output - target).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - return self.inner_model(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip - - -class DiscreteSchedule(nn.Module): - """A mapping between continuous noise levels (sigmas) and a list of discrete noise - levels.""" - - def __init__(self, sigmas, quantize): - super().__init__() - self.register_buffer('sigmas', sigmas) - self.register_buffer('log_sigmas', sigmas.log()) - self.quantize = quantize - - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] - - def get_sigmas(self, n=None): - if n is None: - return sampling.append_zero(self.sigmas.flip(0)) - t_max = len(self.sigmas) - 1 - t = torch.linspace(t_max, 0, n, device=self.sigmas.device) - return sampling.append_zero(self.t_to_sigma(t)) - - def sigma_to_discrete_timestep(self, sigma): - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) - - def sigma_to_t(self, sigma, quantize=None): - quantize = self.quantize if quantize is None else quantize - if quantize: - return self.sigma_to_discrete_timestep(sigma) - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - low_idx = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) - high_idx = low_idx + 1 - low, high = self.log_sigmas[low_idx], self.log_sigmas[high_idx] - w = (low - log_sigma) / (low - high) - w = w.clamp(0, 1) - t = (1 - w) * low_idx + w * high_idx - return t.view(sigma.shape) - - def t_to_sigma(self, t): - t = t.float() - low_idx = t.floor().long() - high_idx = t.ceil().long() - w = t-low_idx if t.device.type == 'mps' else t.frac() - log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() - - def predict_eps_discrete_timestep(self, input, t, **kwargs): - if t.dtype != torch.int64 and t.dtype != torch.int32: - t = t.round() - sigma = self.t_to_sigma(t) - input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) - return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) - - def predict_eps_sigma(self, input, sigma, **kwargs): - input = input * ((utils.append_dims(sigma, input.ndim) ** 2 + 1.0) ** 0.5) - return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim) - -class DiscreteEpsDDPMDenoiser(DiscreteSchedule): - """A wrapper for discrete schedule DDPM models that output eps (the predicted - noise).""" - - def __init__(self, model, alphas_cumprod, quantize): - super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize) - self.inner_model = model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_out = -sigma - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_out, c_in - - def get_eps(self, *args, **kwargs): - return self.inner_model(*args, **kwargs) - - def loss(self, input, noise, sigma, **kwargs): - c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - eps = self.get_eps(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - return (eps - noise).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) - return input + eps * c_out - - -class OpenAIDenoiser(DiscreteEpsDDPMDenoiser): - """A wrapper for OpenAI diffusion models.""" - - def __init__(self, model, diffusion, quantize=False, has_learned_sigmas=True, device='cpu'): - alphas_cumprod = torch.tensor(diffusion.alphas_cumprod, device=device, dtype=torch.float32) - super().__init__(model, alphas_cumprod, quantize=quantize) - self.has_learned_sigmas = has_learned_sigmas - - def get_eps(self, *args, **kwargs): - model_output = self.inner_model(*args, **kwargs) - if self.has_learned_sigmas: - return model_output.chunk(2, dim=1)[0] - return model_output - - -class CompVisDenoiser(DiscreteEpsDDPMDenoiser): - """A wrapper for CompVis diffusion models.""" - - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_eps(self, *args, **kwargs): - return self.inner_model.apply_model(*args, **kwargs) - - -class DiscreteVDDPMDenoiser(DiscreteSchedule): - """A wrapper for discrete schedule DDPM models that output v.""" - - def __init__(self, model, alphas_cumprod, quantize): - super().__init__(((1 - alphas_cumprod) / alphas_cumprod) ** 0.5, quantize) - self.inner_model = model - self.sigma_data = 1. - - def get_scalings(self, sigma): - c_skip = self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - c_out = -sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - c_in = 1 / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - return c_skip, c_out, c_in - - def get_v(self, *args, **kwargs): - return self.inner_model(*args, **kwargs) - - def loss(self, input, noise, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - noised_input = input + noise * utils.append_dims(sigma, input.ndim) - model_output = self.get_v(noised_input * c_in, self.sigma_to_t(sigma), **kwargs) - target = (input - c_skip * noised_input) / c_out - return (model_output - target).pow(2).flatten(1).mean(1) - - def forward(self, input, sigma, **kwargs): - c_skip, c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)] - return self.get_v(input * c_in, self.sigma_to_t(sigma), **kwargs) * c_out + input * c_skip - - -class CompVisVDenoiser(DiscreteVDDPMDenoiser): - """A wrapper for CompVis diffusion models that output v.""" - - def __init__(self, model, quantize=False, device='cpu'): - super().__init__(model, model.alphas_cumprod, quantize=quantize) - - def get_v(self, x, t, cond, **kwargs): - return self.inner_model.apply_model(x, t, cond) diff --git a/comfy/samplers.py b/comfy/samplers.py index 518b666db2f..92ba5f8ecee 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1,5 +1,4 @@ from .k_diffusion import sampling as k_diffusion_sampling -from .k_diffusion import external as k_diffusion_external from .extra_samplers import uni_pc import torch import enum From 88410ace9bd249e4647f0332f8f2bb46ea0aa540 Mon Sep 17 00:00:00 2001 From: Joseph Antolick Date: Wed, 1 Nov 2023 16:52:51 -0400 Subject: [PATCH 149/420] fix: handle null case for currentNode widgets to prevent scroll error --- web/extensions/core/contextMenuFilter.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/contextMenuFilter.js b/web/extensions/core/contextMenuFilter.js index 152cd7043de..0a305391a4e 100644 --- a/web/extensions/core/contextMenuFilter.js +++ b/web/extensions/core/contextMenuFilter.js @@ -25,7 +25,7 @@ const ext = { requestAnimationFrame(() => { const currentNode = LGraphCanvas.active_canvas.current_node; const clickedComboValue = currentNode.widgets - .filter(w => w.type === "combo" && w.options.values.length === values.length) + ?.filter(w => w.type === "combo" && w.options.values.length === values.length) .find(w => w.options.values.every((v, i) => v === values[i])) ?.value; From ecb80abb58d53b2e88c03272645d3c059f86b931 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 19:13:03 -0400 Subject: [PATCH 150/420] Allow ModelSamplingDiscrete to be instantiated without a model config. --- comfy/model_base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 37a52debf21..41d464e523c 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -32,9 +32,12 @@ def calculate_denoised(self, sigma, model_output, model_input): class ModelSamplingDiscrete(torch.nn.Module): - def __init__(self, model_config): + def __init__(self, model_config=None): super().__init__() - self._register_schedule(given_betas=None, beta_schedule=model_config.beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + beta_schedule = "linear" + if model_config is not None: + beta_schedule = model_config.beta_schedule + self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) self.sigma_data = 1.0 def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, From 2455aaed8a50e7a9f89f70ce0eb84fe3f34fc971 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 20:27:20 -0400 Subject: [PATCH 151/420] Allow model or clip to be None in load_lora_for_models. --- comfy/sd.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 4a2823c9d24..65a61343be1 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -55,13 +55,26 @@ def load_clip_weights(model, sd): def load_lora_for_models(model, clip, lora, strength_model, strength_clip): - key_map = comfy.lora.model_lora_keys_unet(model.model) - key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + key_map = {} + if model is not None: + key_map = comfy.lora.model_lora_keys_unet(model.model, key_map) + if clip is not None: + key_map = comfy.lora.model_lora_keys_clip(clip.cond_stage_model, key_map) + loaded = comfy.lora.load_lora(lora, key_map) - new_modelpatcher = model.clone() - k = new_modelpatcher.add_patches(loaded, strength_model) - new_clip = clip.clone() - k1 = new_clip.add_patches(loaded, strength_clip) + if model is not None: + new_modelpatcher = model.clone() + k = new_modelpatcher.add_patches(loaded, strength_model) + else: + k = () + new_modelpatcher = None + + if clip is not None: + new_clip = clip.clone() + k1 = new_clip.add_patches(loaded, strength_clip) + else: + k1 = () + new_clip = None k = set(k) k1 = set(k1) for x in loaded: From d2e27b48f169b0e5def6b9b2c7874e4010282921 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 1 Nov 2023 20:49:37 -0400 Subject: [PATCH 152/420] sampler_cfg_function now gets the noisy output as argument again. This should make things that use sampler_cfg_function behave like before. Added an input argument for those that want the denoised output. This means you can calculate the x0 prediction of the model by doing: (input - cond) for example. --- comfy/samplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 92ba5f8ecee..22a9b68aefb 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -251,8 +251,8 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": cond, "uncond": uncond, "cond_scale": cond_scale, "timestep": timestep} - return model_options["sampler_cfg_function"](args) + args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x} + return x - model_options["sampler_cfg_function"](args) else: return uncond + (cond - uncond) * cond_scale From 6e84a01ecc31ea0ab5c83bf0698e4b5d4027955e Mon Sep 17 00:00:00 2001 From: Matteo Spinelli Date: Thu, 2 Nov 2023 17:29:57 +0100 Subject: [PATCH 153/420] Refactor the template manager (#1878) * add drag-drop to node template manager * better dnd, save field on change * actually save templates --------- Co-authored-by: matt3o --- web/extensions/core/nodeTemplates.js | 237 +++++++++++++++++---------- 1 file changed, 153 insertions(+), 84 deletions(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 434491075c3..b6479f454da 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -14,6 +14,9 @@ import { ComfyDialog, $el } from "../../scripts/ui.js"; // To delete/rename: // Right click the canvas // Node templates -> Manage +// +// To rearrange: +// Open the manage dialog and Drag and drop elements using the "Name:" label as handle const id = "Comfy.NodeTemplates"; @@ -22,6 +25,10 @@ class ManageTemplates extends ComfyDialog { super(); this.element.classList.add("comfy-manage-templates"); this.templates = this.load(); + this.draggedEl = null; + this.saveVisualCue = null; + this.emptyImg = new Image(); + this.emptyImg.src = 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs='; this.importInput = $el("input", { type: "file", @@ -35,14 +42,11 @@ class ManageTemplates extends ComfyDialog { createButtons() { const btns = super.createButtons(); - btns[0].textContent = "Cancel"; - btns.unshift( - $el("button", { - type: "button", - textContent: "Save", - onclick: () => this.save(), - }) - ); + btns[0].textContent = "Close"; + btns[0].onclick = (e) => { + clearTimeout(this.saveVisualCue); + this.close(); + }; btns.unshift( $el("button", { type: "button", @@ -71,25 +75,6 @@ class ManageTemplates extends ComfyDialog { } } - save() { - // Find all visible inputs and save them as our new list - const inputs = this.element.querySelectorAll("input"); - const updated = []; - - for (let i = 0; i < inputs.length; i++) { - const input = inputs[i]; - if (input.parentElement.style.display !== "none") { - const t = this.templates[i]; - t.name = input.value.trim() || input.getAttribute("data-name"); - updated.push(t); - } - } - - this.templates = updated; - this.store(); - this.close(); - } - store() { localStorage.setItem(id, JSON.stringify(this.templates)); } @@ -145,71 +130,155 @@ class ManageTemplates extends ComfyDialog { super.show( $el( "div", - { - style: { - display: "grid", - gridTemplateColumns: "1fr auto", - gap: "5px", - }, - }, - this.templates.flatMap((t) => { + {}, + this.templates.flatMap((t,i) => { let nameInput; return [ $el( - "label", + "div", { - textContent: "Name: ", + dataset: { id: i }, + className: "tempateManagerRow", + style: { + display: "grid", + gridTemplateColumns: "1fr auto", + border: "1px dashed transparent", + gap: "5px", + backgroundColor: "var(--comfy-menu-bg)" + }, + ondragstart: (e) => { + this.draggedEl = e.currentTarget; + e.currentTarget.style.opacity = "0.6"; + e.currentTarget.style.border = "1px dashed yellow"; + e.dataTransfer.effectAllowed = 'move'; + e.dataTransfer.setDragImage(this.emptyImg, 0, 0); + }, + ondragend: (e) => { + e.target.style.opacity = "1"; + e.currentTarget.style.border = "1px dashed transparent"; + e.currentTarget.removeAttribute("draggable"); + + // rearrange the elements in the localStorage + this.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => { + var prev_i = el.dataset.id; + + if ( el == this.draggedEl && prev_i != i ) { + [this.templates[i], this.templates[prev_i]] = [this.templates[prev_i], this.templates[i]]; + } + el.dataset.id = i; + }); + this.store(); + }, + ondragover: (e) => { + e.preventDefault(); + if ( e.currentTarget == this.draggedEl ) + return; + + let rect = e.currentTarget.getBoundingClientRect(); + if (e.clientY > rect.top + rect.height / 2) { + e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget.nextSibling); + } else { + e.currentTarget.parentNode.insertBefore(this.draggedEl, e.currentTarget); + } + } }, [ - $el("input", { - value: t.name, - dataset: { name: t.name }, - $: (el) => (nameInput = el), - }), - ] - ), - $el( - "div", - {}, - [ - $el("button", { - textContent: "Export", - style: { - fontSize: "12px", - fontWeight: "normal", - }, - onclick: (e) => { - const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); - const url = URL.createObjectURL(blob); - const a = $el("a", { - href: url, - download: (nameInput.value || t.name) + ".json", - style: {display: "none"}, - parent: document.body, - }); - a.click(); - setTimeout(function () { - a.remove(); - window.URL.revokeObjectURL(url); - }, 0); - }, - }), - $el("button", { - textContent: "Delete", - style: { - fontSize: "12px", - color: "red", - fontWeight: "normal", - }, - onclick: (e) => { - nameInput.value = ""; - e.target.parentElement.style.display = "none"; - e.target.parentElement.previousElementSibling.style.display = "none"; + $el( + "label", + { + textContent: "Name: ", + style: { + cursor: "grab", + }, + onmousedown: (e) => { + // enable dragging only from the label + if (e.target.localName == 'label') + e.currentTarget.parentNode.draggable = 'true'; + } }, - }), + [ + $el("input", { + value: t.name, + dataset: { name: t.name }, + style: { + transitionProperty: 'background-color', + transitionDuration: '0s', + }, + onchange: (e) => { + clearTimeout(this.saveVisualCue); + var el = e.target; + var row = el.parentNode.parentNode; + this.templates[row.dataset.id].name = el.value.trim() || 'untitled'; + this.store(); + el.style.backgroundColor = 'rgb(40, 95, 40)'; + el.style.transitionDuration = '0s'; + this.saveVisualCue = setTimeout(function () { + el.style.transitionDuration = '.7s'; + el.style.backgroundColor = 'var(--comfy-input-bg)'; + }, 15); + }, + onkeypress: (e) => { + var el = e.target; + clearTimeout(this.saveVisualCue); + el.style.transitionDuration = '0s'; + el.style.backgroundColor = 'var(--comfy-input-bg)'; + }, + $: (el) => (nameInput = el), + }) + ] + ), + $el( + "div", + {}, + [ + $el("button", { + textContent: "Export", + style: { + fontSize: "12px", + fontWeight: "normal", + }, + onclick: (e) => { + const json = JSON.stringify({templates: [t]}, null, 2); // convert the data to a JSON string + const blob = new Blob([json], {type: "application/json"}); + const url = URL.createObjectURL(blob); + const a = $el("a", { + href: url, + download: (nameInput.value || t.name) + ".json", + style: {display: "none"}, + parent: document.body, + }); + a.click(); + setTimeout(function () { + a.remove(); + window.URL.revokeObjectURL(url); + }, 0); + }, + }), + $el("button", { + textContent: "Delete", + style: { + fontSize: "12px", + color: "red", + fontWeight: "normal", + }, + onclick: (e) => { + const item = e.target.parentNode.parentNode; + item.parentNode.removeChild(item); + this.templates.splice(item.dataset.id*1, 1); + this.store(); + // update the rows index, setTimeout ensures that the list is updated + var that = this; + setTimeout(function (){ + that.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => { + el.dataset.id = i; + }); + }, 0); + }, + }), + ] + ), ] - ), + ) ]; }) ) From ee74ef5c9ed9e9c8ecb967e6ce58ec74f664fd0c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 2 Nov 2023 13:07:41 -0400 Subject: [PATCH 154/420] Increase maximum batch size in LatentRebatch. --- comfy_extras/nodes_rebatch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py index 0a9daf27276..88a4ebe29f6 100644 --- a/comfy_extras/nodes_rebatch.py +++ b/comfy_extras/nodes_rebatch.py @@ -4,7 +4,7 @@ class LatentRebatch: @classmethod def INPUT_TYPES(s): return {"required": { "latents": ("LATENT",), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), }} RETURN_TYPES = ("LATENT",) INPUT_IS_LIST = True From ae2acfc21b984ee780e0e1329a3c7b7189903501 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 3 Nov 2023 13:11:16 -0400 Subject: [PATCH 155/420] Don't convert Nan to zero. Converting Nan to zero is a bad idea because it makes it hard to tell when something went wrong. --- comfy/samplers.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 22a9b68aefb..964febb262e 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -136,10 +136,10 @@ def cond_cat(c_list): def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) - out_count = torch.zeros_like(x_in) + out_count = torch.ones_like(x_in) * 1e-37 out_uncond = torch.zeros_like(x_in) - out_uncond_count = torch.zeros_like(x_in) + out_uncond_count = torch.ones_like(x_in) * 1e-37 COND = 0 UNCOND = 1 @@ -239,9 +239,6 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot del out_count out_uncond /= out_uncond_count del out_uncond_count - - torch.nan_to_num(out_cond, nan=0.0, posinf=0.0, neginf=0.0, out=out_cond) #in case out_count or out_uncond_count had some zeros - torch.nan_to_num(out_uncond, nan=0.0, posinf=0.0, neginf=0.0, out=out_uncond) return out_cond, out_uncond From 1ffa8858e7e50cbe84180e0c455621e7db0fe7c0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 4 Nov 2023 01:32:23 -0400 Subject: [PATCH 156/420] Move model sampling code to comfy/model_sampling.py --- comfy/model_base.py | 77 +--------------------------------------- comfy/model_sampling.py | 78 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 76 deletions(-) create mode 100644 comfy/model_sampling.py diff --git a/comfy/model_base.py b/comfy/model_base.py index 41d464e523c..d1a95daad83 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1,11 +1,9 @@ import torch from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation -from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep import comfy.model_management import comfy.conds -import numpy as np from enum import Enum from . import utils @@ -14,79 +12,7 @@ class ModelType(Enum): V_PREDICTION = 2 -#NOTE: all this sampling stuff will be moved -class EPS: - def calculate_input(self, sigma, noise): - sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) - return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - - def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) - return model_input - model_output * sigma - - -class V_PREDICTION(EPS): - def calculate_denoised(self, sigma, model_output, model_input): - sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) - return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 - - -class ModelSamplingDiscrete(torch.nn.Module): - def __init__(self, model_config=None): - super().__init__() - beta_schedule = "linear" - if model_config is not None: - beta_schedule = model_config.beta_schedule - self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) - self.sigma_data = 1.0 - - def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if given_betas is not None: - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) - # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - - # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) - # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) - # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) - - sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 - - self.register_buffer('sigmas', sigmas) - self.register_buffer('log_sigmas', sigmas.log()) - - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] - - def timestep(self, sigma): - log_sigma = sigma.log() - dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) - - def sigma(self, timestep): - t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) - low_idx = t.floor().long() - high_idx = t.ceil().long() - w = t.frac() - log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() - - def percent_to_sigma(self, percent): - return self.sigma(torch.tensor(percent * 999.0)) +from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete def model_sampling(model_config, model_type): if model_type == ModelType.EPS: @@ -102,7 +28,6 @@ class ModelSampling(s, c): return ModelSampling(model_config) - class BaseModel(torch.nn.Module): def __init__(self, model_config, model_type=ModelType.EPS, device=None): super().__init__() diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py new file mode 100644 index 00000000000..5e229323818 --- /dev/null +++ b/comfy/model_sampling.py @@ -0,0 +1,78 @@ +import torch +import numpy as np +from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule + + +class EPS: + def calculate_input(self, sigma, noise): + sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1)) + return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input - model_output * sigma + + +class V_PREDICTION(EPS): + def calculate_denoised(self, sigma, model_output, model_input): + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5 + + +class ModelSamplingDiscrete(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + beta_schedule = "linear" + if model_config is not None: + beta_schedule = model_config.beta_schedule + self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + self.sigma_data = 1.0 + + def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if given_betas is not None: + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32) + # alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + + # self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32)) + # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) + + sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) + + def sigma(self, timestep): + t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) + From 7e455adc071974d178fbdd7dde616f48787f6c51 Mon Sep 17 00:00:00 2001 From: gameltb Date: Sun, 5 Nov 2023 17:11:44 +0800 Subject: [PATCH 157/420] fix unet_wrapper_function name in ModelPatcher --- comfy/model_patcher.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 50b725b8611..0efdf46e82f 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -107,10 +107,10 @@ def model_patches_to(self, device): for k in patch_list: if hasattr(patch_list[k], "to"): patch_list[k] = patch_list[k].to(device) - if "unet_wrapper_function" in self.model_options: - wrap_func = self.model_options["unet_wrapper_function"] + if "model_function_wrapper" in self.model_options: + wrap_func = self.model_options["model_function_wrapper"] if hasattr(wrap_func, "to"): - self.model_options["unet_wrapper_function"] = wrap_func.to(device) + self.model_options["model_function_wrapper"] = wrap_func.to(device) def model_dtype(self): if hasattr(self.model, "get_dtype"): From 02f062b5b7d8013e8d58a9c7e244aa8637b8062c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 5 Nov 2023 12:29:28 -0500 Subject: [PATCH 158/420] Sanitize unknown node types on load to prevent XSS. --- web/scripts/app.js | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 583310a27c7..638afd56c5d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -5,6 +5,22 @@ import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; + +function sanitizeNodeName(string) { + let entityMap = { + '&': '', + '<': '', + '>': '', + '"': '', + "'": '', + '`': '', + '=': '' + }; + return String(string).replace(/[&<>"'`=\/]/g, function fromEntityMap (s) { + return entityMap[s]; + }); +} + /** * @typedef {import("types/comfy").ComfyExtension} ComfyExtension */ @@ -1480,6 +1496,7 @@ export class ComfyApp { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { + n.type = sanitizeNodeName(n.type); missingNodeTypes.push(n.type); } } From 4acfc11a802fad4e90103f9fd3cf73cb0c9b5ae1 Mon Sep 17 00:00:00 2001 From: matt3o Date: Sun, 5 Nov 2023 19:00:23 +0100 Subject: [PATCH 159/420] add difference blend mode --- comfy_extras/nodes_post_processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 324cfe105f2..12704f545d6 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -23,7 +23,7 @@ def INPUT_TYPES(s): "max": 1.0, "step": 0.01 }), - "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light"],), + "blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],), }, } @@ -54,6 +54,8 @@ def blend_mode(self, img1, img2, mode): return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) elif mode == "soft_light": return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) + elif mode == "difference": + return img1 - img2 else: raise ValueError(f"Unsupported blend mode: {mode}") From b3fcd64c6c9c57a8a83ceeff3e6eb7121b122f08 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 6 Nov 2023 01:09:18 -0500 Subject: [PATCH 160/420] Make SDTokenizer class work with more types of tokenizers. --- comfy/sd1_clip.py | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index fdaa1e6c76e..4761230a6d0 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -343,17 +343,24 @@ def load_embed(embedding_name, embedding_directory, embedding_size, embed_key=No return embed_out class SDTokenizer: - def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l'): + def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") - self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path) + self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path) self.max_length = max_length - self.max_tokens_per_section = self.max_length - 2 empty = self.tokenizer('')["input_ids"] - self.start_token = empty[0] - self.end_token = empty[1] + if has_start_token: + self.tokens_start = 1 + self.start_token = empty[0] + self.end_token = empty[1] + else: + self.tokens_start = 0 + self.start_token = None + self.end_token = empty[0] self.pad_with_end = pad_with_end + self.pad_to_max_length = pad_to_max_length + vocab = self.tokenizer.get_vocab() self.inv_vocab = {v: k for k, v in vocab.items()} self.embedding_directory = embedding_directory @@ -414,11 +421,13 @@ def tokenize_with_weights(self, text:str, return_word_ids=False): else: continue #parse word - tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][1:-1]]) + tokens.append([(t, weight) for t in self.tokenizer(word)["input_ids"][self.tokens_start:-1]]) #reshape token array to CLIP input size batched_tokens = [] - batch = [(self.start_token, 1.0, 0)] + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) batched_tokens.append(batch) for i, t_group in enumerate(tokens): #determine if we're going to try and keep the tokens in a single batch @@ -435,16 +444,21 @@ def tokenize_with_weights(self, text:str, return_word_ids=False): #add end token and pad else: batch.append((self.end_token, 1.0, 0)) - batch.extend([(pad_token, 1.0, 0)] * (remaining_length)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (remaining_length)) #start new batch - batch = [(self.start_token, 1.0, 0)] + batch = [] + if self.start_token is not None: + batch.append((self.start_token, 1.0, 0)) batched_tokens.append(batch) else: batch.extend([(t,w,i+1) for t,w in t_group]) t_group = [] #fill last batch - batch.extend([(self.end_token, 1.0, 0)] + [(pad_token, 1.0, 0)] * (self.max_length - len(batch) - 1)) + batch.append((self.end_token, 1.0, 0)) + if self.pad_to_max_length: + batch.extend([(pad_token, 1.0, 0)] * (self.max_length - len(batch))) if not return_word_ids: batched_tokens = [[(t, w) for t, w,_ in x] for x in batched_tokens] From 656c0b5d90239efb8be4281d2c16d52ca722064c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 6 Nov 2023 13:43:50 -0500 Subject: [PATCH 161/420] CLIP code refactor and improvements. More generic clip model class that can be used on more types of text encoders. Don't apply weighting algorithm when weight is 1.0 Don't compute an empty token output when it's not needed. --- comfy/sd1_clip.py | 84 ++++++++++++++++++++++++++++++++-------------- comfy/sd2_clip.py | 3 +- comfy/sdxl_clip.py | 8 ++--- 3 files changed, 62 insertions(+), 33 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4761230a6d0..7db7ee0f449 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -8,32 +8,54 @@ from . import model_management import contextlib +def gen_empty_tokens(special_tokens, length): + start_token = special_tokens.get("start", None) + end_token = special_tokens.get("end", None) + pad_token = special_tokens.get("pad") + output = [] + if start_token is not None: + output.append(start_token) + if end_token is not None: + output.append(end_token) + output += [pad_token] * (length - len(output)) + return output + class ClipTokenWeightEncoder: def encode_token_weights(self, token_weight_pairs): - to_encode = list(self.empty_tokens) + to_encode = list() + max_token_len = 0 + has_weights = False for x in token_weight_pairs: tokens = list(map(lambda a: a[0], x)) + max_token_len = max(len(tokens), max_token_len) + has_weights = has_weights or not all(map(lambda a: a[1] == 1.0, x)) to_encode.append(tokens) + sections = len(to_encode) + if has_weights or sections == 0: + to_encode.append(gen_empty_tokens(self.special_tokens, max_token_len)) + out, pooled = self.encode(to_encode) - z_empty = out[0:1] - if pooled.shape[0] > 1: - first_pooled = pooled[1:2] + if pooled is not None: + first_pooled = pooled[0:1].cpu() else: - first_pooled = pooled[0:1] + first_pooled = pooled output = [] - for k in range(1, out.shape[0]): + for k in range(0, sections): z = out[k:k+1] - for i in range(len(z)): - for j in range(len(z[i])): - weight = token_weight_pairs[k - 1][j][1] - z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j] + if has_weights: + z_empty = out[-1] + for i in range(len(z)): + for j in range(len(z[i])): + weight = token_weight_pairs[k][j][1] + if weight != 1.0: + z[i][j] = (z[i][j] - z_empty[j]) * weight + z_empty[j] output.append(z) if (len(output) == 0): - return z_empty.cpu(), first_pooled.cpu() - return torch.cat(output, dim=-2).cpu(), first_pooled.cpu() + return out[-1:].cpu(), first_pooled + return torch.cat(output, dim=-2).cpu(), first_pooled class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" @@ -43,37 +65,43 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): "hidden" ] def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77, - freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None): # clip-vit-base-patch32 + freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None, + special_tokens={"start": 49406, "end": 49407, "pad": 49407},layer_norm_hidden_state=True, config_class=CLIPTextConfig, + model_class=CLIPTextModel, inner_name="text_model"): # clip-vit-base-patch32 super().__init__() assert layer in self.LAYERS self.num_layers = 12 if textmodel_path is not None: - self.transformer = CLIPTextModel.from_pretrained(textmodel_path) + self.transformer = model_class.from_pretrained(textmodel_path) else: if textmodel_json_config is None: textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") - config = CLIPTextConfig.from_json_file(textmodel_json_config) + config = config_class.from_json_file(textmodel_json_config) self.num_layers = config.num_hidden_layers with comfy.ops.use_comfy_ops(device, dtype): with modeling_utils.no_init_weights(): - self.transformer = CLIPTextModel(config) + self.transformer = model_class(config) + self.inner_name = inner_name if dtype is not None: self.transformer.to(dtype) - self.transformer.text_model.embeddings.token_embedding.to(torch.float32) - self.transformer.text_model.embeddings.position_embedding.to(torch.float32) + inner_model = getattr(self.transformer, self.inner_name) + if hasattr(inner_model, "embeddings"): + inner_model.embeddings.to(torch.float32) + else: + self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(torch.float32)) self.max_length = max_length if freeze: self.freeze() self.layer = layer self.layer_idx = None - self.empty_tokens = [[49406] + [49407] * 76] + self.special_tokens = special_tokens self.text_projection = torch.nn.Parameter(torch.eye(self.transformer.get_input_embeddings().weight.shape[1])) self.logit_scale = torch.nn.Parameter(torch.tensor(4.6055)) self.enable_attention_masks = False - self.layer_norm_hidden_state = True + self.layer_norm_hidden_state = layer_norm_hidden_state if layer == "hidden": assert layer_idx is not None assert abs(layer_idx) <= self.num_layers @@ -117,7 +145,7 @@ def set_up_textual_embeddings(self, tokens, current_embeds): else: print("WARNING: shape mismatch when trying to apply embedding, embedding will be ignored", y.shape[0], current_embeds.weight.shape[1]) while len(tokens_temp) < len(x): - tokens_temp += [self.empty_tokens[0][-1]] + tokens_temp += [self.special_tokens["pad"]] out_tokens += [tokens_temp] n = token_dict_size @@ -142,7 +170,7 @@ def forward(self, tokens): tokens = self.set_up_textual_embeddings(tokens, backup_embeds) tokens = torch.LongTensor(tokens).to(device) - if self.transformer.text_model.final_layer_norm.weight.dtype != torch.float32: + if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32: precision_scope = torch.autocast else: precision_scope = lambda a, b: contextlib.nullcontext(a) @@ -168,12 +196,16 @@ def forward(self, tokens): else: z = outputs.hidden_states[self.layer_idx] if self.layer_norm_hidden_state: - z = self.transformer.text_model.final_layer_norm(z) + z = getattr(self.transformer, self.inner_name).final_layer_norm(z) + + if hasattr(outputs, "pooler_output"): + pooled_output = outputs.pooler_output.float() + else: + pooled_output = None - pooled_output = outputs.pooler_output - if self.text_projection is not None: + if self.text_projection is not None and pooled_output is not None: pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() - return z.float(), pooled_output.float() + return z.float(), pooled_output def encode(self, tokens): return self(tokens) diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index ebabf7ccd51..2ee0ca05586 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -9,8 +9,7 @@ def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, la layer_idx=23 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) - self.empty_tokens = [[49406] + [49407] + [0] * 75] + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}) class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index 4c508a0ea88..673399e2222 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -9,9 +9,8 @@ def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate" layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype) - self.empty_tokens = [[49406] + [49407] + [0] * 75] - self.layer_norm_hidden_state = False + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, + special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) def load_sd(self, sd): return super().load_sd(sd) @@ -38,8 +37,7 @@ def untokenize(self, token_weight_pair): class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype) - self.clip_l.layer_norm_hidden_state = False + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype, layer_norm_hidden_state=False) self.clip_g = SDXLClipG(device=device, dtype=dtype) def clip_layer(self, layer_idx): From 844dbf97a71b398301e1a6318c6776bc5b1f5b7e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 03:28:53 -0500 Subject: [PATCH 162/420] Add: advanced->model->ModelSamplingDiscrete node. This allows changing the sampling parameters of the model (eps or vpred) or set the model to use zsnr. --- comfy/model_patcher.py | 17 +++++++++ comfy/model_sampling.py | 2 + comfy_extras/nodes_model_advanced.py | 57 ++++++++++++++++++++++++++++ nodes.py | 1 + 4 files changed, 77 insertions(+) create mode 100644 comfy_extras/nodes_model_advanced.py diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 0efdf46e82f..0f5385597eb 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -11,6 +11,8 @@ def __init__(self, model, load_device, offload_device, size=0, current_device=No self.model = model self.patches = {} self.backup = {} + self.object_patches = {} + self.object_patches_backup = {} self.model_options = {"transformer_options":{}} self.model_size() self.load_device = load_device @@ -91,6 +93,9 @@ def set_model_attn2_output_patch(self, patch): def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") + def add_object_patch(self, name, obj): + self.object_patches[name] = obj + def model_patches_to(self, device): to = self.model_options["transformer_options"] if "patches" in to: @@ -150,6 +155,12 @@ def model_state_dict(self, filter_prefix=None): return sd def patch_model(self, device_to=None): + for k in self.object_patches: + old = getattr(self.model, k) + if k not in self.object_patches_backup: + self.object_patches_backup[k] = old + setattr(self.model, k, self.object_patches[k]) + model_sd = self.model_state_dict() for key in self.patches: if key not in model_sd: @@ -290,3 +301,9 @@ def unpatch_model(self, device_to=None): if device_to is not None: self.model.to(device_to) self.current_device = device_to + + keys = list(self.object_patches_backup.keys()) + for k in keys: + setattr(self.model, k, self.object_patches_backup[k]) + + self.object_patches_backup = {} diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 5e229323818..a2935d47d18 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -48,7 +48,9 @@ def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps # self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32)) sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5 + self.set_sigmas(sigmas) + def set_sigmas(self, sigmas): self.register_buffer('sigmas', sigmas) self.register_buffer('log_sigmas', sigmas.log()) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py new file mode 100644 index 00000000000..c02cfb05a26 --- /dev/null +++ b/comfy_extras/nodes_model_advanced.py @@ -0,0 +1,57 @@ +import folder_paths +import comfy.sd +import comfy.model_sampling + + +def rescale_zero_terminal_snr_sigmas(sigmas): + alphas_cumprod = 1 / ((sigmas * sigmas) + 1) + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return ((1 - alphas_bar) / alphas_bar) ** 0.5 + +class ModelSamplingDiscrete: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "sampling": (["eps", "v_prediction"],), + "zsnr": ("BOOLEAN", {"default": False}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, sampling, zsnr): + m = model.clone() + + if sampling == "eps": + sampling_type = comfy.model_sampling.EPS + elif sampling == "v_prediction": + sampling_type = comfy.model_sampling.V_PREDICTION + + class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingDiscrete, sampling_type): + pass + + model_sampling = ModelSamplingAdvanced() + if zsnr: + model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas)) + m.add_object_patch("model_sampling", model_sampling) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "ModelSamplingDiscrete": ModelSamplingDiscrete, +} diff --git a/nodes.py b/nodes.py index 61ebbb8b49e..5ed015442ab 100644 --- a/nodes.py +++ b/nodes.py @@ -1798,6 +1798,7 @@ def init_custom_nodes(): "nodes_freelunch.py", "nodes_custom_sampler.py", "nodes_hypertile.py", + "nodes_model_advanced.py", ] for node_file in extras_files: From 2a23ba0b8c225b59902423ef08db0de39d2ed7e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 04:30:37 -0500 Subject: [PATCH 163/420] Fix unet ops not entirely on GPU. --- comfy/ldm/modules/diffusionmodules/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index d890c8044aa..0298ca99d4d 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -170,8 +170,8 @@ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): if not repeat_only: half = dim // 2 freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half + ) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: From a527d0c795ba5572708095fcf0f9366e2076ba7e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 19:33:40 -0500 Subject: [PATCH 164/420] Code refactor. --- .../modules/diffusionmodules/openaimodel.py | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 7dfdfc0a29c..6c2113e3e4f 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -251,6 +251,12 @@ def __init__(self, dim): def forward(self, t): return timestep_embedding(t, self.dim) +def apply_control(h, control, name): + if control is not None and name in control and len(control[name]) > 0: + ctrl = control[name].pop() + if ctrl is not None: + h += ctrl + return h class UNetModel(nn.Module): """ @@ -617,25 +623,17 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo for id, module in enumerate(self.input_blocks): transformer_options["block"] = ("input", id) h = forward_timestep_embed(module, h, emb, context, transformer_options) - if control is not None and 'input' in control and len(control['input']) > 0: - ctrl = control['input'].pop() - if ctrl is not None: - h += ctrl + h = apply_control(h, control, 'input') hs.append(h) + transformer_options["block"] = ("middle", 0) h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) - if control is not None and 'middle' in control and len(control['middle']) > 0: - ctrl = control['middle'].pop() - if ctrl is not None: - h += ctrl + h = apply_control(h, control, 'middle') for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() - if control is not None and 'output' in control and len(control['output']) > 0: - ctrl = control['output'].pop() - if ctrl is not None: - hsp += ctrl + h = apply_control(h, control, 'output') if "output_block_patch" in transformer_patches: patch = transformer_patches["output_block_patch"] From fe40109b57bc3cf3c79c98f34c06041bf917f22f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 22:15:15 -0500 Subject: [PATCH 165/420] Fix issue with object patches not being copied with patcher. --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 0f5385597eb..55800e86ea4 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -40,6 +40,7 @@ def clone(self): for k in self.patches: n.patches[k] = self.patches[k][:] + n.object_patches = self.object_patches.copy() n.model_options = copy.deepcopy(self.model_options) n.model_keys = self.model_keys return n From 0a6fd49a3ef730741fc5f43ca89f3fadd3401129 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 22:15:55 -0500 Subject: [PATCH 166/420] Print leftover keys when using the UNETLoader. --- comfy/sd.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 65a61343be1..65d94f46ecc 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -496,6 +496,9 @@ def load_unet(unet_path): #load unet in diffusers format model = model_config.get_model(new_sd, "") model = model.to(offload_device) model.load_model_weights(new_sd, "") + left_over = sd.keys() + if len(left_over) > 0: + print("left over keys in unet:", left_over) return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device) def save_checkpoint(output_path, model, clip, vae, metadata=None): From 794dd2064d82988fd63250f3e79b226cfdbc4e93 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 7 Nov 2023 23:41:55 -0500 Subject: [PATCH 167/420] Fix typo. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 6c2113e3e4f..49c1e8cbb5a 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -633,7 +633,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() - h = apply_control(h, control, 'output') + hsp = apply_control(hsp, control, 'output') if "output_block_patch" in transformer_patches: patch = transformer_patches["output_block_patch"] From 064d7583ebc0d6f9c0c4d28da76717d99230a64d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Nov 2023 01:59:09 -0500 Subject: [PATCH 168/420] Add a CONDConstant for passing non tensor conds to unet. --- comfy/conds.py | 15 +++++++++++++++ comfy/model_base.py | 5 ++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/comfy/conds.py b/comfy/conds.py index 1e3111baff8..6cff2518400 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -62,3 +62,18 @@ def concat(self, others): c = c.repeat(1, crossattn_max_len // c.shape[1], 1) #padding with repeat doesn't change result out.append(c) return torch.cat(out) + +class CONDConstant(CONDRegular): + def __init__(self, cond): + self.cond = cond + + def process_cond(self, batch_size, device, **kwargs): + return self._copy_with(self.cond) + + def can_concat(self, other): + if self.cond != other.cond: + return False + return True + + def concat(self, others): + return self.cond diff --git a/comfy/model_base.py b/comfy/model_base.py index d1a95daad83..7ba253470f4 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -61,7 +61,10 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans context = context.to(dtype) extra_conds = {} for o in kwargs: - extra_conds[o] = kwargs[o].to(dtype) + extra = kwargs[o] + if hasattr(extra, "to"): + extra = extra.to(dtype) + extra_conds[o] = extra model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() return self.model_sampling.calculate_denoised(sigma, model_output, x) From ec120001363271ca039c8e07dabd8837df6498cd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 8 Nov 2023 22:05:31 -0500 Subject: [PATCH 169/420] Add support for full diff lora keys. --- comfy/lora.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index d4cf94c9599..29c59d89307 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -131,6 +131,18 @@ def load_lora(lora, to_load): loaded_keys.add(b_norm_name) patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (b_norm,) + diff_name = "{}.diff".format(x) + diff_weight = lora.get(diff_name, None) + if diff_weight is not None: + patch_dict[to_load[x]] = (diff_weight,) + loaded_keys.add(diff_name) + + diff_bias_name = "{}.diff_b".format(x) + diff_bias = lora.get(diff_bias_name, None) + if diff_bias is not None: + patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (diff_bias,) + loaded_keys.add(diff_bias_name) + for x in lora.keys(): if x not in loaded_keys: print("lora key not loaded", x) From cd6df8b323d4d7d32730f4460f76795dd9b8ca60 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 13:10:19 -0500 Subject: [PATCH 170/420] Fix sanitize node name removing the "/" character. --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 638afd56c5d..50e20522202 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -16,7 +16,7 @@ function sanitizeNodeName(string) { '`': '', '=': '' }; - return String(string).replace(/[&<>"'`=\/]/g, function fromEntityMap (s) { + return String(string).replace(/[&<>"'`=]/g, function fromEntityMap (s) { return entityMap[s]; }); } From 72e3feb5735adc7b968c2bc8d0b5cd5e9bea9c59 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 9 Nov 2023 18:33:43 +0000 Subject: [PATCH 171/420] Load API JSON (#1932) * added loading api json * revert async change * reorder --- web/scripts/app.js | 69 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 50e20522202..61b88d44b85 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1469,6 +1469,17 @@ export class ComfyApp { localStorage.setItem("litegrapheditor_clipboard", old); } + showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { + this.ui.dialog.show( + `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( + (t) => `
  • ${t}
  • ` + ).join("")}
${hasAddedNodes ? "Nodes that have failed to load will show as red on the graph." : ""}` + ); + this.logging.addEntry("Comfy.App", "warn", { + MissingNodes: missingNodeTypes, + }); + } + /** * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object @@ -1587,14 +1598,7 @@ export class ComfyApp { } if (missingNodeTypes.length) { - this.ui.dialog.show( - `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( - (t) => `
  • ${t}
  • ` - ).join("")}
Nodes that have failed to load will show as red on the graph.` - ); - this.logging.addEntry("Comfy.App", "warn", { - MissingNodes: missingNodeTypes, - }); + this.showMissingNodesError(missingNodeTypes); } } @@ -1825,9 +1829,11 @@ export class ComfyApp { } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); reader.onload = () => { - var jsonContent = JSON.parse(reader.result); + const jsonContent = JSON.parse(reader.result); if (jsonContent?.templates) { this.loadTemplateData(jsonContent); + } else if(this.isApiJson(jsonContent)) { + this.loadApiJson(jsonContent); } else { this.loadGraphData(jsonContent); } @@ -1841,6 +1847,51 @@ export class ComfyApp { } } + isApiJson(data) { + return Object.values(data).every((v) => v.class_type); + } + + loadApiJson(apiData) { + const missingNodeTypes = Object.values(apiData).filter((n) => !LiteGraph.registered_node_types[n.class_type]); + if (missingNodeTypes.length) { + this.showMissingNodesError(missingNodeTypes.map(t => t.class_type), false); + return; + } + + const ids = Object.keys(apiData); + app.graph.clear(); + for (const id of ids) { + const data = apiData[id]; + const node = LiteGraph.createNode(data.class_type); + node.id = id; + graph.add(node); + } + + for (const id of ids) { + const data = apiData[id]; + const node = app.graph.getNodeById(id); + for (const input in data.inputs ?? {}) { + const value = data.inputs[input]; + if (value instanceof Array) { + const [fromId, fromSlot] = value; + const fromNode = app.graph.getNodeById(fromId); + const toSlot = node.inputs?.findIndex((inp) => inp.name === input); + if (toSlot !== -1) { + fromNode.connect(fromSlot, node, toSlot); + } + } else { + const widget = node.widgets?.find((w) => w.name === input); + if (widget) { + widget.value = value; + widget.callback?.(value); + } + } + } + } + + app.graph.arrange(); + } + /** * Registers a Comfy web extension with the app * @param {ComfyExtension} extension From ca71e542d2123cc58ef9c5884f67a6a211e4c41a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 17:35:17 -0500 Subject: [PATCH 172/420] Lower cfg step to 0.1 in sampler nodes. --- comfy_extras/nodes_custom_sampler.py | 2 +- nodes.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index b52ad8fbd70..154ecd0d234 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -188,7 +188,7 @@ def INPUT_TYPES(s): {"model": ("MODEL",), "add_noise": ("BOOLEAN", {"default": True}), "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), "sampler": ("SAMPLER", ), diff --git a/nodes.py b/nodes.py index 5ed015442ab..2bbfd8fe874 100644 --- a/nodes.py +++ b/nodes.py @@ -1218,7 +1218,7 @@ def INPUT_TYPES(s): {"model": ("MODEL",), "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), "positive": ("CONDITIONING", ), @@ -1244,7 +1244,7 @@ def INPUT_TYPES(s): "add_noise": (["enable", "disable"], ), "noise_seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), - "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + "cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ), "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ), "positive": ("CONDITIONING", ), From 002aefa382585d171aef13c7bd21f64b8664fe28 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 9 Nov 2023 17:57:51 -0500 Subject: [PATCH 173/420] Support lcm models. Use the "lcm" sampler to sample them, you also have to use the ModelSamplingDiscrete node to set them as lcm models to use them properly. --- comfy/k_diffusion/sampling.py | 15 +++++- comfy/samplers.py | 2 +- comfy_extras/nodes_model_advanced.py | 75 +++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 4 deletions(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index 937c5a3881d..dd6f7bbe598 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -717,7 +717,6 @@ def DDPMSampler_step(x, sigma, sigma_prev, noise, noise_sampler): mu += ((1 - alpha) * (1. - alpha_cumprod_prev) / (1. - alpha_cumprod)).sqrt() * noise_sampler(sigma, sigma_prev) return mu - def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None, step_function=None): extra_args = {} if extra_args is None else extra_args noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler @@ -737,3 +736,17 @@ def generic_step_sampler(model, x, sigmas, extra_args=None, callback=None, disab def sample_ddpm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None): return generic_step_sampler(model, x, sigmas, extra_args, callback, disable, noise_sampler, DDPMSampler_step) +@torch.no_grad() +def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None): + extra_args = {} if extra_args is None else extra_args + noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler + s_in = x.new_ones([x.shape[0]]) + for i in trange(len(sigmas) - 1, disable=disable): + denoised = model(x, sigmas[i] * s_in, **extra_args) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised}) + + x = denoised + if sigmas[i + 1] > 0: + x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1]) + return x diff --git a/comfy/samplers.py b/comfy/samplers.py index 964febb262e..d7ff8985044 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -519,7 +519,7 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm"] + "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] def ksampler(sampler_name, extra_options={}, inpaint_options={}): class KSAMPLER(Sampler): diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index c02cfb05a26..42596fbd52d 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -1,6 +1,72 @@ import folder_paths import comfy.sd import comfy.model_sampling +import torch + +class LCM(comfy.model_sampling.EPS): + def calculate_denoised(self, sigma, model_output, model_input): + timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1)) + x0 = model_input - model_output * sigma + + sigma_data = 0.5 + scaled_timestep = timestep * 10.0 #timestep_scaling + + c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2) + c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5 + + return c_out * x0 + c_skip * model_input + +class ModelSamplingDiscreteLCM(torch.nn.Module): + def __init__(self): + super().__init__() + self.sigma_data = 1.0 + timesteps = 1000 + beta_start = 0.00085 + beta_end = 0.012 + + betas = torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=torch.float32) ** 2 + alphas = 1.0 - betas + alphas_cumprod = torch.cumprod(alphas, dim=0) + + original_timesteps = 50 + self.skip_steps = timesteps // original_timesteps + + + alphas_cumprod_valid = torch.zeros((original_timesteps), dtype=torch.float32) + for x in range(original_timesteps): + alphas_cumprod_valid[original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] + + sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5 + self.set_sigmas(sigmas) + + def set_sigmas(self, sigmas): + self.register_buffer('sigmas', sigmas) + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + log_sigma = sigma.log() + dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] + return dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1) + + def sigma(self, timestep): + t = torch.clamp(((timestep - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) + low_idx = t.floor().long() + high_idx = t.ceil().long() + w = t.frac() + log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] + return log_sigma.exp() + + def percent_to_sigma(self, percent): + return self.sigma(torch.tensor(percent * 999.0)) def rescale_zero_terminal_snr_sigmas(sigmas): @@ -26,7 +92,7 @@ class ModelSamplingDiscrete: @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), - "sampling": (["eps", "v_prediction"],), + "sampling": (["eps", "v_prediction", "lcm"],), "zsnr": ("BOOLEAN", {"default": False}), }} @@ -38,17 +104,22 @@ def INPUT_TYPES(s): def patch(self, model, sampling, zsnr): m = model.clone() + sampling_base = comfy.model_sampling.ModelSamplingDiscrete if sampling == "eps": sampling_type = comfy.model_sampling.EPS elif sampling == "v_prediction": sampling_type = comfy.model_sampling.V_PREDICTION + elif sampling == "lcm": + sampling_type = LCM + sampling_base = ModelSamplingDiscreteLCM - class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingDiscrete, sampling_type): + class ModelSamplingAdvanced(sampling_base, sampling_type): pass model_sampling = ModelSamplingAdvanced() if zsnr: model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas)) + m.add_object_patch("model_sampling", model_sampling) return (m, ) From 3e0033ef30a111076af54a7a4e6b470cdc570886 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 03:19:05 -0500 Subject: [PATCH 174/420] Fix model merge bug. Unload models before getting weights for model patching. --- comfy/model_patcher.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 55800e86ea4..ef18d1b2342 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -134,6 +134,7 @@ def add_patches(self, patches, strength_patch=1.0, strength_model=1.0): return list(p) def get_key_patches(self, filter_prefix=None): + comfy.model_management.unload_model_clones(self) model_sd = self.model_state_dict() p = {} for k in model_sd: From 58d5d71a93908c6edd783d85557c2556b2e179c7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 20:52:10 -0500 Subject: [PATCH 175/420] Working RescaleCFG node. This was broken because of recent changes so I fixed it and moved it from the experiments repo. --- comfy/samplers.py | 2 +- comfy_extras/nodes_model_advanced.py | 39 ++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index d7ff8985044..a839ee9e2a2 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -248,7 +248,7 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x} + args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) else: return uncond + (cond - uncond) * cond_scale diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 42596fbd52d..09d2d9072b2 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -123,6 +123,45 @@ class ModelSamplingAdvanced(sampling_base, sampling_type): m.add_object_patch("model_sampling", model_sampling) return (m, ) +class RescaleCFG: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "multiplier": ("FLOAT", {"default": 0.7, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, multiplier): + def rescale_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + sigma = args["sigma"] + x_orig = args["input"] + + #rescale cfg has to be done on v-pred model output + x = x_orig / (sigma * sigma + 1.0) + cond = ((x - (x_orig - cond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma) + uncond = ((x - (x_orig - uncond)) * (sigma ** 2 + 1.0) ** 0.5) / (sigma) + + #rescalecfg + x_cfg = uncond + cond_scale * (cond - uncond) + ro_pos = torch.std(cond, dim=(1,2,3), keepdim=True) + ro_cfg = torch.std(x_cfg, dim=(1,2,3), keepdim=True) + + x_rescaled = x_cfg * (ro_pos / ro_cfg) + x_final = multiplier * x_rescaled + (1.0 - multiplier) * x_cfg + + return x_orig - (x - x_final * sigma / (sigma * sigma + 1.0) ** 0.5) + + m = model.clone() + m.set_model_sampler_cfg_function(rescale_cfg) + return (m, ) + NODE_CLASS_MAPPINGS = { "ModelSamplingDiscrete": ModelSamplingDiscrete, + "RescaleCFG": RescaleCFG, } From ca2812bae09f337378dc1d70714bf7287e27883a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 10 Nov 2023 22:05:25 -0500 Subject: [PATCH 176/420] Fix RescaleCFG for batch size > 1. --- comfy_extras/nodes_model_advanced.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 09d2d9072b2..399123eaa2e 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -140,6 +140,7 @@ def rescale_cfg(args): uncond = args["uncond"] cond_scale = args["cond_scale"] sigma = args["sigma"] + sigma = sigma.view(sigma.shape[:1] + (1,) * (cond.ndim - 1)) x_orig = args["input"] #rescale cfg has to be done on v-pred model output From 412d3ff57d01d7e8c0889f686e31836170c4bfe3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 01:00:43 -0500 Subject: [PATCH 177/420] Refactor. --- comfy/ops.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/comfy/ops.py b/comfy/ops.py index 610d54584fa..0bfb698aa7f 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1,29 +1,23 @@ import torch from contextlib import contextmanager -class Linear(torch.nn.Module): - def __init__(self, in_features: int, out_features: int, bias: bool = True, - device=None, dtype=None) -> None: - factory_kwargs = {'device': device, 'dtype': dtype} - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.weight = torch.nn.Parameter(torch.empty((out_features, in_features), **factory_kwargs)) - if bias: - self.bias = torch.nn.Parameter(torch.empty(out_features, **factory_kwargs)) - else: - self.register_parameter('bias', None) - - def forward(self, input): - return torch.nn.functional.linear(input, self.weight, self.bias) +class Linear(torch.nn.Linear): + def reset_parameters(self): + return None class Conv2d(torch.nn.Conv2d): def reset_parameters(self): return None +class Conv3d(torch.nn.Conv3d): + def reset_parameters(self): + return None + def conv_nd(dims, *args, **kwargs): if dims == 2: return Conv2d(*args, **kwargs) + elif dims == 3: + return Conv3d(*args, **kwargs) else: raise ValueError(f"unsupported dimensions: {dims}") From 4a8a839b40fcae9960a6107200b89dce6675895d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 01:03:39 -0500 Subject: [PATCH 178/420] Add option to use in place weight updating in ModelPatcher. --- comfy/model_patcher.py | 21 ++++++++++++++++----- comfy/utils.py | 8 ++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index ef18d1b2342..6d7a61c416a 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -6,7 +6,7 @@ import comfy.model_management class ModelPatcher: - def __init__(self, model, load_device, offload_device, size=0, current_device=None): + def __init__(self, model, load_device, offload_device, size=0, current_device=None, weight_inplace_update=False): self.size = size self.model = model self.patches = {} @@ -22,6 +22,8 @@ def __init__(self, model, load_device, offload_device, size=0, current_device=No else: self.current_device = current_device + self.weight_inplace_update = weight_inplace_update + def model_size(self): if self.size > 0: return self.size @@ -171,15 +173,20 @@ def patch_model(self, device_to=None): weight = model_sd[key] + inplace_update = self.weight_inplace_update + if key not in self.backup: - self.backup[key] = weight.to(self.offload_device) + self.backup[key] = weight.to(device=device_to, copy=inplace_update) if device_to is not None: temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) else: temp_weight = weight.to(torch.float32, copy=True) out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) - comfy.utils.set_attr(self.model, key, out_weight) + if inplace_update: + comfy.utils.copy_to_param(self.model, key, out_weight) + else: + comfy.utils.set_attr(self.model, key, out_weight) del temp_weight if device_to is not None: @@ -295,8 +302,12 @@ def calculate_weight(self, patches, weight, key): def unpatch_model(self, device_to=None): keys = list(self.backup.keys()) - for k in keys: - comfy.utils.set_attr(self.model, k, self.backup[k]) + if self.weight_inplace_update: + for k in keys: + comfy.utils.copy_to_param(self.model, k, self.backup[k]) + else: + for k in keys: + comfy.utils.set_attr(self.model, k, self.backup[k]) self.backup = {} diff --git a/comfy/utils.py b/comfy/utils.py index 6a0c54e8098..4b484d07ac9 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -261,6 +261,14 @@ def set_attr(obj, attr, value): setattr(obj, attrs[-1], torch.nn.Parameter(value)) del prev +def copy_to_param(obj, attr, value): + # inplace update tensor instead of replacing it + attrs = attr.split(".") + for name in attrs[:-1]: + obj = getattr(obj, name) + prev = getattr(obj, attrs[-1]) + prev.data.copy_(value) + def get_attr(obj, attr): attrs = attr.split(".") for name in attrs: From 248aa3e56355d75ac3d8632af769e6c700d9bfac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 11 Nov 2023 12:20:16 -0500 Subject: [PATCH 179/420] Fix bug. --- comfy/model_patcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 6d7a61c416a..9dc09791add 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -176,7 +176,7 @@ def patch_model(self, device_to=None): inplace_update = self.weight_inplace_update if key not in self.backup: - self.backup[key] = weight.to(device=device_to, copy=inplace_update) + self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) if device_to is not None: temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) From 006b24cc328977005a866a04f418a99dd76f2c4d Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Sat, 11 Nov 2023 15:56:14 -0300 Subject: [PATCH 180/420] Prevent image cache --- web/extensions/core/maskeditor.js | 4 ++-- web/scripts/app.js | 6 +++++- web/scripts/widgets.js | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/web/extensions/core/maskeditor.js b/web/extensions/core/maskeditor.js index f6292b9e378..8ace79562e4 100644 --- a/web/extensions/core/maskeditor.js +++ b/web/extensions/core/maskeditor.js @@ -42,7 +42,7 @@ async function uploadMask(filepath, formData) { }); ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']] = new Image(); - ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam()); + ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src = api.apiURL("/view?" + new URLSearchParams(filepath).toString() + app.getPreviewFormatParam() + app.getRandParam()); if(ComfyApp.clipspace.images) ComfyApp.clipspace.images[ComfyApp.clipspace['selectedIndex']] = filepath; @@ -657,4 +657,4 @@ app.registerExtension({ const context_predicate = () => ComfyApp.clipspace && ComfyApp.clipspace.imgs && ComfyApp.clipspace.imgs.length > 0 ClipspaceDialog.registerButton("MaskEditor", context_predicate, ComfyApp.open_maskeditor); } -}); \ No newline at end of file +}); diff --git a/web/scripts/app.js b/web/scripts/app.js index 61b88d44b85..2fa6c7f88e1 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -83,6 +83,10 @@ export class ComfyApp { return ""; } + getRandParam() { + return "&rand=" + Math.random(); + } + static isImageNode(node) { return node.imgs || (node && node.widgets && node.widgets.findIndex(obj => obj.name === 'image') >= 0); } @@ -427,7 +431,7 @@ export class ComfyApp { this.images = output.images; imagesChanged = true; imgURLs = imgURLs.concat(output.images.map(params => { - return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam()); + return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam() + app.getRandParam()); })) } } diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b674776937..88c0b07ee93 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -356,7 +356,7 @@ export const ComfyWidgets = { subfolder = name.substring(0, folder_separator); name = name.substring(folder_separator + 1); } - img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`); + img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}${app.getRandParam()}`); node.setSizeForImage?.(); } From 8d80584f6a2797268b9b57ec84d6c76e8a27891c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 01:25:33 -0500 Subject: [PATCH 181/420] Remove useless argument from uni_pc sampler. --- comfy/extra_samplers/uni_pc.py | 2 +- comfy/samplers.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/extra_samplers/uni_pc.py b/comfy/extra_samplers/uni_pc.py index 1a7a8392902..08bf0fc9e67 100644 --- a/comfy/extra_samplers/uni_pc.py +++ b/comfy/extra_samplers/uni_pc.py @@ -858,7 +858,7 @@ def predict_eps_sigma(model, input, sigma_in, **kwargs): return (input - model(input, sigma_in, **kwargs)) / sigma -def sample_unipc(model, noise, image, sigmas, sampling_function, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): +def sample_unipc(model, noise, image, sigmas, max_denoise, extra_args=None, callback=None, disable=False, noise_mask=None, variant='bh1'): timesteps = sigmas.clone() if sigmas[-1] == 0: timesteps = sigmas[:] diff --git a/comfy/samplers.py b/comfy/samplers.py index a839ee9e2a2..b8836a29d26 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -511,11 +511,11 @@ def max_denoise(self, model_wrap, sigmas): class UNIPC(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, disable=disable_pbar) class UNIPCBH2(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, sampling_function=sampling_function, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) + return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", From 2c9dba8dc08eb35d29dab691c1f2808f6c9191ea Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 03:45:10 -0500 Subject: [PATCH 182/420] sampling_function now has the model object as the argument. --- comfy/samplers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index b8836a29d26..a2c784a4a48 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -11,7 +11,7 @@ #The main sampling function shared by all the samplers #Returns denoised -def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): +def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 @@ -134,7 +134,7 @@ def cond_cat(c_list): return out - def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_total_area, model_options): + def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in) * 1e-37 @@ -221,9 +221,9 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot c['transformer_options'] = transformer_options if 'model_function_wrapper' in model_options: - output = model_options['model_function_wrapper'](model_function, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) + output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) else: - output = model_function(input_x, timestep_, **c).chunk(batch_chunks) + output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) del input_x for o in range(batch_chunks): @@ -246,7 +246,7 @@ def calc_cond_uncond_batch(model_function, cond, uncond, x_in, timestep, max_tot if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, model_options) + cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, max_total_area, model_options) if "sampler_cfg_function" in model_options: args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) @@ -258,7 +258,7 @@ def __init__(self, model): super().__init__() self.inner_model = model def apply_model(self, x, timestep, cond, uncond, cond_scale, model_options={}, seed=None): - out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) + out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed) return out def forward(self, *args, **kwargs): return self.apply_model(*args, **kwargs) From dd4ba68b6e93a562d9499eff34e50dbbbc8714e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 04:02:16 -0500 Subject: [PATCH 183/420] Allow different models to estimate memory usage differently. --- comfy/model_base.py | 10 ++++++++++ comfy/model_management.py | 21 --------------------- comfy/model_patcher.py | 3 +++ comfy/sample.py | 2 +- comfy/samplers.py | 9 +++++---- 5 files changed, 19 insertions(+), 26 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 7ba253470f4..f6de0b258d1 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -157,6 +157,16 @@ def state_dict_for_saving(self, clip_state_dict, vae_state_dict): def set_inpaint(self): self.inpaint_model = True + def memory_required(self, input_shape): + area = input_shape[0] * input_shape[2] * input_shape[3] + if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): + #TODO: this needs to be tweaked + return (area / 20) * (1024 * 1024) + else: + #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. + return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) + + def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): adm_inputs = [] weights = [] diff --git a/comfy/model_management.py b/comfy/model_management.py index 53582fc736d..799e52ba239 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -579,27 +579,6 @@ def get_free_memory(dev=None, torch_free_too=False): else: return mem_free_total -def batch_area_memory(area): - if xformers_enabled() or pytorch_attention_flash_attention(): - #TODO: these formulas are copied from maximum_batch_area below - return (area / 20) * (1024 * 1024) - else: - return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) - -def maximum_batch_area(): - global vram_state - if vram_state == VRAMState.NO_VRAM: - return 0 - - memory_free = get_free_memory() / (1024 * 1024) - if xformers_enabled() or pytorch_attention_flash_attention(): - #TODO: this needs to be tweaked - area = 20 * memory_free - else: - #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future - area = ((memory_free - 1024) * 0.9) / (0.6) - return int(max(area, 0)) - def cpu_mode(): global cpu_state return cpu_state == CPUState.CPU diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 9dc09791add..1c36855de90 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -52,6 +52,9 @@ def is_clone(self, other): return True return False + def memory_required(self, input_shape): + return self.model.memory_required(input_shape=input_shape) + def set_model_sampler_cfg_function(self, sampler_cfg_function): if len(inspect.signature(sampler_cfg_function).parameters) == 3: self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way diff --git a/comfy/sample.py b/comfy/sample.py index b3fcd1658a5..4bfdb8ce55d 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -83,7 +83,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): real_model = None models, inference_memory = get_additional_models(positive, negative, model.model_dtype()) - comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise_shape[0] * noise_shape[2] * noise_shape[3]) + inference_memory) + comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise_shape) + inference_memory) real_model = model.model return real_model, positive, negative, noise_mask, models diff --git a/comfy/samplers.py b/comfy/samplers.py index a2c784a4a48..5340dd019b4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -134,7 +134,7 @@ def cond_cat(c_list): return out - def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, model_options): + def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): out_cond = torch.zeros_like(x_in) out_count = torch.ones_like(x_in) * 1e-37 @@ -170,9 +170,11 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, to_batch_temp.reverse() to_batch = to_batch_temp[:1] + free_memory = model_management.get_free_memory(x_in.device) for i in range(1, len(to_batch_temp) + 1): batch_amount = to_batch_temp[:len(to_batch_temp)//i] - if (len(batch_amount) * first_shape[0] * first_shape[2] * first_shape[3] < max_total_area): + input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:] + if model.memory_required(input_shape) < free_memory: to_batch = batch_amount break @@ -242,11 +244,10 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, max_total_area, return out_cond, out_uncond - max_total_area = model_management.maximum_batch_area() if math.isclose(cond_scale, 1.0): uncond = None - cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, max_total_area, model_options) + cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) if "sampler_cfg_function" in model_options: args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} return x - model_options["sampler_cfg_function"](args) From 4781819a85847a8cf180a41d0ee4cdf99979e5be Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 12 Nov 2023 04:26:16 -0500 Subject: [PATCH 184/420] Make memory estimation aware of model dtype. --- comfy/model_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index f6de0b258d1..37bf24bb8c6 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -161,7 +161,7 @@ def memory_required(self, input_shape): area = input_shape[0] * input_shape[2] * input_shape[3] if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - return (area / 20) * (1024 * 1024) + return (area / (comfy.model_management.dtype_size(self.get_dtype()) * 10)) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) From 4aeef781a3caecc694e3336ca9339e8e171ba4d4 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 12 Nov 2023 19:49:23 +0000 Subject: [PATCH 185/420] Support number/text ids when importing API JSON (#1952) * support numeric/text ids --- web/scripts/app.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 61b88d44b85..d22b98c315f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1863,7 +1863,7 @@ export class ComfyApp { for (const id of ids) { const data = apiData[id]; const node = LiteGraph.createNode(data.class_type); - node.id = id; + node.id = isNaN(+id) ? id : +id; graph.add(node); } From f12ec55983fb13b3bcad33b05ff8043b22b36181 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 00:42:34 -0500 Subject: [PATCH 186/420] Allow boolean widgets to have no options dict. --- web/scripts/widgets.js | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 2b674776937..36bc7ff7fd7 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -305,14 +305,23 @@ export const ComfyWidgets = { }; }, BOOLEAN(node, inputName, inputData) { - let defaultVal = inputData[1]["default"]; + let defaultVal = false; + let options = {}; + if (inputData[1]) { + if (inputData[1].default) + defaultVal = inputData[1].default; + if (inputData[1].label_on) + options["on"] = inputData[1].label_on; + if (inputData[1].label_off) + options["off"] = inputData[1].label_off; + } return { widget: node.addWidget( "toggle", inputName, defaultVal, () => {}, - {"on": inputData[1].label_on, "off": inputData[1].label_off} + options, ) }; }, From 7339479b10a622729222ae7d9a5e06db340a1b99 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 12:27:44 -0500 Subject: [PATCH 187/420] Disable xformers when it can't load properly. --- comfy/model_management.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 799e52ba239..be4301aa4e3 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -133,6 +133,10 @@ def get_total_memory(dev=None, torch_total_too=False): import xformers import xformers.ops XFORMERS_IS_AVAILABLE = True + try: + XFORMERS_IS_AVAILABLE = xformers._has_cpp_library + except: + pass try: XFORMERS_VERSION = xformers.version.__version__ print("xformers version:", XFORMERS_VERSION) From eb0407e80657ab603a1251a653ad8b2e9e89c83c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 16:26:28 -0500 Subject: [PATCH 188/420] Update litegraph to latest. --- web/lib/litegraph.core.js | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index e906590f5ef..0ca2038429e 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -2533,7 +2533,7 @@ var w = this.widgets[i]; if(!w) continue; - if(w.options && w.options.property && this.properties[ w.options.property ]) + if(w.options && w.options.property && (this.properties[ w.options.property ] != undefined)) w.value = JSON.parse( JSON.stringify( this.properties[ w.options.property ] ) ); } if (info.widgets_values) { @@ -4928,9 +4928,7 @@ LGraphNode.prototype.executeAction = function(action) this.title = o.title; this._bounding.set(o.bounding); this.color = o.color; - if (o.font_size) { - this.font_size = o.font_size; - } + this.font_size = o.font_size; }; LGraphGroup.prototype.serialize = function() { @@ -5714,10 +5712,10 @@ LGraphNode.prototype.executeAction = function(action) * @method enableWebGL **/ LGraphCanvas.prototype.enableWebGL = function() { - if (typeof GL === undefined) { + if (typeof GL === "undefined") { throw "litegl.js must be included to use a WebGL canvas"; } - if (typeof enableWebGLCanvas === undefined) { + if (typeof enableWebGLCanvas === "undefined") { throw "webglCanvas.js must be included to use this feature"; } @@ -7110,15 +7108,16 @@ LGraphNode.prototype.executeAction = function(action) } }; - LGraphCanvas.prototype.copyToClipboard = function() { + LGraphCanvas.prototype.copyToClipboard = function(nodes) { var clipboard_info = { nodes: [], links: [] }; var index = 0; var selected_nodes_array = []; - for (var i in this.selected_nodes) { - var node = this.selected_nodes[i]; + if (!nodes) nodes = this.selected_nodes; + for (var i in nodes) { + var node = nodes[i]; if (node.clonable === false) continue; node._relative_id = index; @@ -11702,7 +11701,7 @@ LGraphNode.prototype.executeAction = function(action) default: iS = 0; // try with first if no name set } - if (typeof options.node_from.outputs[iS] !== undefined){ + if (typeof options.node_from.outputs[iS] !== "undefined"){ if (iS!==false && iS>-1){ options.node_from.connectByType( iS, node, options.node_from.outputs[iS].type ); } @@ -11730,7 +11729,7 @@ LGraphNode.prototype.executeAction = function(action) default: iS = 0; // try with first if no name set } - if (typeof options.node_to.inputs[iS] !== undefined){ + if (typeof options.node_to.inputs[iS] !== "undefined"){ if (iS!==false && iS>-1){ // try connection options.node_to.connectByTypeOutput(iS,node,options.node_to.inputs[iS].type); From 61112c81b99d0e43c2d6031aae036eed8a39fdbb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 21:45:08 -0500 Subject: [PATCH 189/420] Add a node to flip the sigmas for unsampling. --- comfy_extras/nodes_custom_sampler.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 154ecd0d234..ff7407f4192 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -118,6 +118,24 @@ def get_sigmas(self, sigmas, step): sigmas2 = sigmas[step:] return (sigmas1, sigmas2) +class FlipSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/sigmas" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, sigmas): + sigmas = sigmas.flip(0) + if sigmas[0] == 0: + sigmas[0] = 0.0001 + return (sigmas,) + class KSamplerSelect: @classmethod def INPUT_TYPES(s): @@ -243,4 +261,5 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, + "FlipSigmas": FlipSigmas, } From 8509bd58b436eb56e1e251c627416b457626252a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 13 Nov 2023 21:45:23 -0500 Subject: [PATCH 190/420] Reorganize custom_sampling nodes. --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index ff7407f4192..f0576946a58 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -16,7 +16,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -36,7 +36,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -54,7 +54,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -73,7 +73,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -92,7 +92,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/schedulers" FUNCTION = "get_sigmas" @@ -109,7 +109,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SIGMAS","SIGMAS") - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/sigmas" FUNCTION = "get_sigmas" @@ -144,7 +144,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -163,7 +163,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -187,7 +187,7 @@ def INPUT_TYPES(s): } } RETURN_TYPES = ("SAMPLER",) - CATEGORY = "sampling/custom_sampling" + CATEGORY = "sampling/custom_sampling/samplers" FUNCTION = "get_sampler" @@ -252,6 +252,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, NODE_CLASS_MAPPINGS = { "SamplerCustom": SamplerCustom, + "BasicScheduler": BasicScheduler, "KarrasScheduler": KarrasScheduler, "ExponentialScheduler": ExponentialScheduler, "PolyexponentialScheduler": PolyexponentialScheduler, @@ -259,7 +260,6 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, - "BasicScheduler": BasicScheduler, "SplitSigmas": SplitSigmas, "FlipSigmas": FlipSigmas, } From 94cc718e9c42cb4de337293b66dd42fb594b9cae Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 00:08:12 -0500 Subject: [PATCH 191/420] Add a way to add patches to the input block. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 5 +++++ comfy/model_patcher.py | 3 +++ 2 files changed, 8 insertions(+) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 49c1e8cbb5a..cac0dfb6598 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -624,6 +624,11 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo transformer_options["block"] = ("input", id) h = forward_timestep_embed(module, h, emb, context, transformer_options) h = apply_control(h, control, 'input') + if "input_block_patch" in transformer_patches: + patch = transformer_patches["input_block_patch"] + for p in patch: + h = p(h, transformer_options) + hs.append(h) transformer_options["block"] = ("middle", 0) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 1c36855de90..02368433126 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -96,6 +96,9 @@ def set_model_attn1_output_patch(self, patch): def set_model_attn2_output_patch(self, patch): self.set_model_patch(patch, "attn2_output_patch") + def set_model_input_block_patch(self, patch): + self.set_model_patch(patch, "input_block_patch") + def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") From f2e49b1d575b3da4367ba4d60b95187f270d42c9 Mon Sep 17 00:00:00 2001 From: Jianqi Pan Date: Tue, 14 Nov 2023 14:32:05 +0900 Subject: [PATCH 192/420] fix: adaptation to older versions of pytroch --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 7db7ee0f449..af621b2dcb5 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -175,7 +175,7 @@ def forward(self, tokens): else: precision_scope = lambda a, b: contextlib.nullcontext(a) - with precision_scope(model_management.get_autocast_device(device), torch.float32): + with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32): attention_mask = None if self.enable_attention_masks: attention_mask = torch.zeros_like(tokens) From 420beeeb05ef59e887f8731f615f8a9ec6eb0a4c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 00:39:34 -0500 Subject: [PATCH 193/420] Clean up and refactor sampler code. This should make it much easier to write custom nodes with kdiffusion type samplers. --- comfy/samplers.py | 85 +++++++++++++++++----------- comfy_extras/nodes_custom_sampler.py | 6 +- 2 files changed, 54 insertions(+), 37 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 5340dd019b4..65c44791d02 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -522,42 +522,59 @@ def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=N "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] -def ksampler(sampler_name, extra_options={}, inpaint_options={}): - class KSAMPLER(Sampler): - def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): - extra_args["denoise_mask"] = denoise_mask - model_k = KSamplerX0Inpaint(model_wrap) - model_k.latent_image = latent_image - if inpaint_options.get("random", False): #TODO: Should this be the default? - generator = torch.manual_seed(extra_args.get("seed", 41) + 1) - model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) - else: - model_k.noise = noise +class KSAMPLER(Sampler): + def __init__(self, sampler_function, extra_options={}, inpaint_options={}): + self.sampler_function = sampler_function + self.extra_options = extra_options + self.inpaint_options = inpaint_options - if self.max_denoise(model_wrap, sigmas): - noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) - else: - noise = noise * sigmas[0] + def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): + extra_args["denoise_mask"] = denoise_mask + model_k = KSamplerX0Inpaint(model_wrap) + model_k.latent_image = latent_image + if self.inpaint_options.get("random", False): #TODO: Should this be the default? + generator = torch.manual_seed(extra_args.get("seed", 41) + 1) + model_k.noise = torch.randn(noise.shape, generator=generator, device="cpu").to(noise.dtype).to(noise.device) + else: + model_k.noise = noise - k_callback = None - total_steps = len(sigmas) - 1 - if callback is not None: - k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) + if self.max_denoise(model_wrap, sigmas): + noise = noise * torch.sqrt(1.0 + sigmas[0] ** 2.0) + else: + noise = noise * sigmas[0] + + k_callback = None + total_steps = len(sigmas) - 1 + if callback is not None: + k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps) + + if latent_image is not None: + noise += latent_image + samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) + return samples + + +def ksampler(sampler_name, extra_options={}, inpaint_options={}): + if sampler_name == "dpm_fast": + def dpm_fast_function(model, noise, sigmas, extra_args, callback, disable): sigma_min = sigmas[-1] if sigma_min == 0: sigma_min = sigmas[-2] + total_steps = len(sigmas) - 1 + return k_diffusion_sampling.sample_dpm_fast(model, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=callback, disable=disable) + sampler_function = dpm_fast_function + elif sampler_name == "dpm_adaptive": + def dpm_adaptive_function(model, noise, sigmas, extra_args, callback, disable): + sigma_min = sigmas[-1] + if sigma_min == 0: + sigma_min = sigmas[-2] + return k_diffusion_sampling.sample_dpm_adaptive(model, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=callback, disable=disable) + sampler_function = dpm_adaptive_function + else: + sampler_function = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name)) - if latent_image is not None: - noise += latent_image - if sampler_name == "dpm_fast": - samples = k_diffusion_sampling.sample_dpm_fast(model_k, noise, sigma_min, sigmas[0], total_steps, extra_args=extra_args, callback=k_callback, disable=disable_pbar) - elif sampler_name == "dpm_adaptive": - samples = k_diffusion_sampling.sample_dpm_adaptive(model_k, noise, sigma_min, sigmas[0], extra_args=extra_args, callback=k_callback, disable=disable_pbar) - else: - samples = getattr(k_diffusion_sampling, "sample_{}".format(sampler_name))(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **extra_options) - return samples - return KSAMPLER + return KSAMPLER(sampler_function, extra_options, inpaint_options) def wrap_model(model): model_denoise = CFGNoisePredictor(model) @@ -618,11 +635,11 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): print("error invalid scheduler", self.scheduler) return sigmas -def sampler_class(name): +def sampler_object(name): if name == "uni_pc": - sampler = UNIPC + sampler = UNIPC() elif name == "uni_pc_bh2": - sampler = UNIPCBH2 + sampler = UNIPCBH2() elif name == "ddim": sampler = ksampler("euler", inpaint_options={"random": True}) else: @@ -687,6 +704,6 @@ def sample(self, noise, positive, negative, cfg, latent_image=None, start_step=N else: return torch.zeros_like(noise) - sampler = sampler_class(self.sampler) + sampler = sampler_object(self.sampler) - return sample(self.model, noise, positive, negative, cfg, self.device, sampler(), sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) + return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index f0576946a58..d3c1d4a23ee 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -149,7 +149,7 @@ def INPUT_TYPES(s): FUNCTION = "get_sampler" def get_sampler(self, sampler_name): - sampler = comfy.samplers.sampler_class(sampler_name)() + sampler = comfy.samplers.sampler_object(sampler_name) return (sampler, ) class SamplerDPMPP_2M_SDE: @@ -172,7 +172,7 @@ def get_sampler(self, solver_type, eta, s_noise, noise_device): sampler_name = "dpmpp_2m_sde" else: sampler_name = "dpmpp_2m_sde_gpu" - sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type})() + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "solver_type": solver_type}) return (sampler, ) @@ -196,7 +196,7 @@ def get_sampler(self, eta, s_noise, r, noise_device): sampler_name = "dpmpp_sde" else: sampler_name = "dpmpp_sde_gpu" - sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r})() + sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise, "r": r}) return (sampler, ) class SamplerCustom: From c962884a5c987e95d6928565ddb44220b769808e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 11:38:36 -0500 Subject: [PATCH 194/420] Make bislerp work on GPU. --- comfy/utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/comfy/utils.py b/comfy/utils.py index 4b484d07ac9..1985012e0f1 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -307,13 +307,13 @@ def slerp(b1, b2, r): res[dot < 1e-5 - 1] = (b1 * (1.0-r) + b2 * r)[dot < 1e-5 - 1] return res - def generate_bilinear_data(length_old, length_new): - coords_1 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + def generate_bilinear_data(length_old, length_new, device): + coords_1 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) coords_1 = torch.nn.functional.interpolate(coords_1, size=(1, length_new), mode="bilinear") ratios = coords_1 - coords_1.floor() coords_1 = coords_1.to(torch.int64) - coords_2 = torch.arange(length_old).reshape((1,1,1,-1)).to(torch.float32) + 1 + coords_2 = torch.arange(length_old, dtype=torch.float32, device=device).reshape((1,1,1,-1)) + 1 coords_2[:,:,:,-1] -= 1 coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear") coords_2 = coords_2.to(torch.int64) @@ -323,7 +323,7 @@ def generate_bilinear_data(length_old, length_new): h_new, w_new = (height, width) #linear w - ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new) + ratios, coords_1, coords_2 = generate_bilinear_data(w, w_new, samples.device) coords_1 = coords_1.expand((n, c, h, -1)) coords_2 = coords_2.expand((n, c, h, -1)) ratios = ratios.expand((n, 1, h, -1)) @@ -336,7 +336,7 @@ def generate_bilinear_data(length_old, length_new): result = result.reshape(n, h, w_new, c).movedim(-1, 1) #linear h - ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new) + ratios, coords_1, coords_2 = generate_bilinear_data(h, h_new, samples.device) coords_1 = coords_1.reshape((1,1,-1,1)).expand((n, c, -1, w_new)) coords_2 = coords_2.reshape((1,1,-1,1)).expand((n, c, -1, w_new)) ratios = ratios.reshape((1,1,-1,1)).expand((n, 1, -1, w_new)) From 728613bb3e9a42a3e05abf19b1b893eb6ef35081 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 14:41:31 -0500 Subject: [PATCH 195/420] Fix last pr. --- comfy/sd1_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index af621b2dcb5..58acb97fce7 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -173,7 +173,7 @@ def forward(self, tokens): if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32: precision_scope = torch.autocast else: - precision_scope = lambda a, b: contextlib.nullcontext(a) + precision_scope = lambda a, dtype: contextlib.nullcontext(a) with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32): attention_mask = None From 7b87c825a3e95b362b101a608bbae2bbf13e1850 Mon Sep 17 00:00:00 2001 From: 42lux Date: Wed, 15 Nov 2023 02:37:35 +0100 Subject: [PATCH 196/420] Added Colorschemes. Arc, North and Github. --- web/extensions/core/colorPalette.js | 207 ++++++++++++++++++++++++++++ 1 file changed, 207 insertions(+) diff --git a/web/extensions/core/colorPalette.js b/web/extensions/core/colorPalette.js index 3695b08e27f..b8d83613d4b 100644 --- a/web/extensions/core/colorPalette.js +++ b/web/extensions/core/colorPalette.js @@ -174,6 +174,213 @@ const colorPalettes = { "tr-odd-bg-color": "#073642", } }, + }, + "arc": { + "id": "arc", + "name": "Arc", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAACXBIWXMAAAsTAAALEwEAmpwYAAABcklEQVR4nO3YMUoDARgF4RfxBqZI6/0vZqFn0MYtrLIQMFN8U6V4LAtD+Jm9XG/v30OGl2e/AP7yevz4+vx45nvgF/+QGITEICQGITEIiUFIjNNC3q43u3/YnRJyPOzeQ+0e220nhRzReC8e7R7bbdvl+Jal1Bs46jEIiUFIDEJiEBKDkBhKPbZT6qHdptRTu02p53DUYxASg5AYhMQgJAYhMZR6bKfUQ7tNqad2m1LP4ajHICQGITEIiUFIDEJiKPXYTqmHdptST+02pZ7DUY9BSAxCYhASg5AYhMRQ6rGdUg/tNqWe2m1KPYejHoOQGITEICQGITEIiaHUYzulHtptSj2125R6Dkc9BiExCIlBSAxCYhASQ6nHdko9tNuUemq3KfUcjnoMQmIQEoOQGITEICSGUo/tlHpotyn11G5T6jkc9RiExCAkBiExCIlBSAylHtsp9dBuU+qp3abUczjqMQiJQUgMQmIQEoOQGITE+AHFISNQrFTGuwAAAABJRU5ErkJggg==", + "CLEAR_BACKGROUND_COLOR": "#2b2f38", + "NODE_TITLE_COLOR": "#b2b7bd", + "NODE_SELECTED_TITLE_COLOR": "#FFF", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#AAA", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#2b2f38", + "NODE_DEFAULT_BGCOLOR": "#242730", + "NODE_DEFAULT_BOXCOLOR": "#6e7581", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#FFF", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 22, + "WIDGET_BGCOLOR": "#2b2f38", + "WIDGET_OUTLINE_COLOR": "#6e7581", + "WIDGET_TEXT_COLOR": "#DDD", + "WIDGET_SECONDARY_TEXT_COLOR": "#b2b7bd", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#fff", + "bg-color": "#2b2f38", + "comfy-menu-bg": "#242730", + "comfy-input-bg": "#2b2f38", + "input-text": "#ddd", + "descrip-text": "#b2b7bd", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#6e7581", + "tr-even-bg-color": "#2b2f38", + "tr-odd-bg-color": "#242730" + } + }, + }, + "nord": { + "id": "nord", + "name": "Nord", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAFu2lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOjUwNDFhMmZjLTEzNzQtMTk0ZC1hZWY4LTYxMzM1MTVmNjUwMCIgeG1wTU06RG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDoyMzFiMTBiMC1iNGZiLTAyNGUtYjEyZS0zMDUzMDNjZDA3YzgiPiA8eG1wTU06SGlzdG9yeT4gPHJkZjpTZXE+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJjcmVhdGVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo1MDQxYTJmYy0xMzc0LTE5NGQtYWVmOC02MTMzNTE1ZjY1MDAiIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDE6MjA6NDUrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz73jWg/AAAAyUlEQVR42u3WKwoAIBRFQRdiMb1idv9Lsxn9gEFw4Dbb8JCTojbbXEJwjJVL2HKwYMGCBQuWLbDmjr+9zrBGjHl1WVcvy2DBggULFizTWQpewSt4HzwsgwULFiwFr7MUvMtS8D54WLBgGSxYCl7BK3iXZbBgwYIFC5bpLAWv4BW8Dx6WwYIFC5aC11kK3mUpeB88LFiwDBYsBa/gFbzLMliwYMGCBct0loJX8AreBw/LYMGCBUvB6ywF77IUvA8eFixYBgsWrNfWAZPltufdad+1AAAAAElFTkSuQmCC", + "CLEAR_BACKGROUND_COLOR": "#212732", + "NODE_TITLE_COLOR": "#999", + "NODE_SELECTED_TITLE_COLOR": "#e5eaf0", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#bcc2c8", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#2e3440", + "NODE_DEFAULT_BGCOLOR": "#161b22", + "NODE_DEFAULT_BOXCOLOR": "#545d70", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#e5eaf0", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + "WIDGET_BGCOLOR": "#2e3440", + "WIDGET_OUTLINE_COLOR": "#545d70", + "WIDGET_TEXT_COLOR": "#bcc2c8", + "WIDGET_SECONDARY_TEXT_COLOR": "#999", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#e5eaf0", + "bg-color": "#2e3440", + "comfy-menu-bg": "#161b22", + "comfy-input-bg": "#2e3440", + "input-text": "#bcc2c8", + "descrip-text": "#999", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#545d70", + "tr-even-bg-color": "#2e3440", + "tr-odd-bg-color": "#161b22" + } + }, + }, + "github": { + "id": "github", + "name": "Github", + "colors": { + "node_slot": { + "BOOLEAN": "", + "CLIP": "#eacb8b", + "CLIP_VISION": "#A8DADC", + "CLIP_VISION_OUTPUT": "#ad7452", + "CONDITIONING": "#cf876f", + "CONTROL_NET": "#00d78d", + "CONTROL_NET_WEIGHTS": "", + "FLOAT": "", + "GLIGEN": "", + "IMAGE": "#80a1c0", + "IMAGEUPLOAD": "", + "INT": "", + "LATENT": "#b38ead", + "LATENT_KEYFRAME": "", + "MASK": "#a3bd8d", + "MODEL": "#8978a7", + "SAMPLER": "", + "SIGMAS": "", + "STRING": "", + "STYLE_MODEL": "#C2FFAE", + "T2I_ADAPTER_WEIGHTS": "", + "TAESD": "#DCC274", + "TIMESTEP_KEYFRAME": "", + "UPSCALE_MODEL": "", + "VAE": "#be616b" + }, + "litegraph_base": { + "BACKGROUND_IMAGE": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAACXBIWXMAAAsTAAALEwEAmpwYAAAGlmlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgOS4xLWMwMDEgNzkuMTQ2Mjg5OSwgMjAyMy8wNi8yNS0yMDowMTo1NSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1sbnM6eG1wTU09Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9tbS8iIHhtbG5zOnN0RXZ0PSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvc1R5cGUvUmVzb3VyY2VFdmVudCMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiB4bXA6Q3JlYXRlRGF0ZT0iMjAyMy0xMS0xM1QwMDoxODowMiswMTowMCIgeG1wOk1vZGlmeURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHhtcDpNZXRhZGF0YURhdGU9IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIiB4bXBNTTpJbnN0YW5jZUlEPSJ4bXAuaWlkOmIyYzRhNjA5LWJmYTctYTg0MC1iOGFlLTk3MzE2ZjM1ZGIyNyIgeG1wTU06RG9jdW1lbnRJRD0iYWRvYmU6ZG9jaWQ6cGhvdG9zaG9wOjk0ZmNlZGU4LTE1MTctZmQ0MC04ZGU3LWYzOTgxM2E3ODk5ZiIgeG1wTU06T3JpZ2luYWxEb2N1bWVudElEPSJ4bXAuZGlkOjIzMWIxMGIwLWI0ZmItMDI0ZS1iMTJlLTMwNTMwM2NkMDdjOCI+IDx4bXBNTTpIaXN0b3J5PiA8cmRmOlNlcT4gPHJkZjpsaSBzdEV2dDphY3Rpb249ImNyZWF0ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6MjMxYjEwYjAtYjRmYi0wMjRlLWIxMmUtMzA1MzAzY2QwN2M4IiBzdEV2dDp3aGVuPSIyMDIzLTExLTEzVDAwOjE4OjAyKzAxOjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgMjUuMSAoV2luZG93cykiLz4gPHJkZjpsaSBzdEV2dDphY3Rpb249InNhdmVkIiBzdEV2dDppbnN0YW5jZUlEPSJ4bXAuaWlkOjQ4OWY1NzlmLTJkNjUtZWQ0Zi04OTg0LTA4NGE2MGE1ZTMzNSIgc3RFdnQ6d2hlbj0iMjAyMy0xMS0xNVQwMjowNDo1OSswMTowMCIgc3RFdnQ6c29mdHdhcmVBZ2VudD0iQWRvYmUgUGhvdG9zaG9wIDI1LjEgKFdpbmRvd3MpIiBzdEV2dDpjaGFuZ2VkPSIvIi8+IDxyZGY6bGkgc3RFdnQ6YWN0aW9uPSJzYXZlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDpiMmM0YTYwOS1iZmE3LWE4NDAtYjhhZS05NzMxNmYzNWRiMjciIHN0RXZ0OndoZW49IjIwMjMtMTEtMTVUMDI6MDQ6NTkrMDE6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyNS4xIChXaW5kb3dzKSIgc3RFdnQ6Y2hhbmdlZD0iLyIvPiA8L3JkZjpTZXE+IDwveG1wTU06SGlzdG9yeT4gPC9yZGY6RGVzY3JpcHRpb24+IDwvcmRmOlJERj4gPC94OnhtcG1ldGE+IDw/eHBhY2tldCBlbmQ9InIiPz4OTe6GAAAAx0lEQVR42u3WMQoAIQxFwRzJys77X8vSLiRgITif7bYbgrwYc/mKXyBoY4VVBgsWLFiwYFmOlTv+9jfDOjHmr8u6eVkGCxYsWLBgmc5S8ApewXvgYRksWLBgKXidpeBdloL3wMOCBctgwVLwCl7BuyyDBQsWLFiwTGcpeAWv4D3wsAwWLFiwFLzOUvAuS8F74GHBgmWwYCl4Ba/gXZbBggULFixYprMUvIJX8B54WAYLFixYCl5nKXiXpeA98LBgwTJYsGC9tg1o8f4TTtqzNQAAAABJRU5ErkJggg==", + "CLEAR_BACKGROUND_COLOR": "#040506", + "NODE_TITLE_COLOR": "#999", + "NODE_SELECTED_TITLE_COLOR": "#e5eaf0", + "NODE_TEXT_SIZE": 14, + "NODE_TEXT_COLOR": "#bcc2c8", + "NODE_SUBTEXT_SIZE": 12, + "NODE_DEFAULT_COLOR": "#161b22", + "NODE_DEFAULT_BGCOLOR": "#13171d", + "NODE_DEFAULT_BOXCOLOR": "#30363d", + "NODE_DEFAULT_SHAPE": "box", + "NODE_BOX_OUTLINE_COLOR": "#e5eaf0", + "DEFAULT_SHADOW_COLOR": "rgba(0,0,0,0.5)", + "DEFAULT_GROUP_FONT": 24, + "WIDGET_BGCOLOR": "#161b22", + "WIDGET_OUTLINE_COLOR": "#30363d", + "WIDGET_TEXT_COLOR": "#bcc2c8", + "WIDGET_SECONDARY_TEXT_COLOR": "#999", + "LINK_COLOR": "#9A9", + "EVENT_LINK_COLOR": "#A86", + "CONNECTING_LINK_COLOR": "#AFA" + }, + "comfy_base": { + "fg-color": "#e5eaf0", + "bg-color": "#161b22", + "comfy-menu-bg": "#13171d", + "comfy-input-bg": "#161b22", + "input-text": "#bcc2c8", + "descrip-text": "#999", + "drag-text": "#ccc", + "error-text": "#ff4444", + "border-color": "#30363d", + "tr-even-bg-color": "#161b22", + "tr-odd-bg-color": "#13171d" + } + }, } }; From 57eea0efbb07a48d4810b477b29d44ba5425a742 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 14 Nov 2023 23:45:36 -0500 Subject: [PATCH 197/420] heunpp2 sampler. --- comfy/k_diffusion/sampling.py | 58 +++++++++++++++++++++++++++++++++++ comfy/samplers.py | 2 +- 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/comfy/k_diffusion/sampling.py b/comfy/k_diffusion/sampling.py index dd6f7bbe598..761c2e0ef7c 100644 --- a/comfy/k_diffusion/sampling.py +++ b/comfy/k_diffusion/sampling.py @@ -750,3 +750,61 @@ def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, n if sigmas[i + 1] > 0: x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1]) return x + + + +@torch.no_grad() +def sample_heunpp2(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1.): + # From MIT licensed: https://github.com/Carzit/sd-webui-samplers-scheduler/ + extra_args = {} if extra_args is None else extra_args + s_in = x.new_ones([x.shape[0]]) + s_end = sigmas[-1] + for i in trange(len(sigmas) - 1, disable=disable): + gamma = min(s_churn / (len(sigmas) - 1), 2 ** 0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0. + eps = torch.randn_like(x) * s_noise + sigma_hat = sigmas[i] * (gamma + 1) + if gamma > 0: + x = x + eps * (sigma_hat ** 2 - sigmas[i] ** 2) ** 0.5 + denoised = model(x, sigma_hat * s_in, **extra_args) + d = to_d(x, sigma_hat, denoised) + if callback is not None: + callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised}) + dt = sigmas[i + 1] - sigma_hat + if sigmas[i + 1] == s_end: + # Euler method + x = x + d * dt + elif sigmas[i + 2] == s_end: + + # Heun's method + x_2 = x + d * dt + denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args) + d_2 = to_d(x_2, sigmas[i + 1], denoised_2) + + w = 2 * sigmas[0] + w2 = sigmas[i+1]/w + w1 = 1 - w2 + + d_prime = d * w1 + d_2 * w2 + + + x = x + d_prime * dt + + else: + # Heun++ + x_2 = x + d * dt + denoised_2 = model(x_2, sigmas[i + 1] * s_in, **extra_args) + d_2 = to_d(x_2, sigmas[i + 1], denoised_2) + dt_2 = sigmas[i + 2] - sigmas[i + 1] + + x_3 = x_2 + d_2 * dt_2 + denoised_3 = model(x_3, sigmas[i + 2] * s_in, **extra_args) + d_3 = to_d(x_3, sigmas[i + 2], denoised_3) + + w = 3 * sigmas[0] + w2 = sigmas[i + 1] / w + w3 = sigmas[i + 2] / w + w1 = 1 - w2 - w3 + + d_prime = w1 * d + w2 * d_2 + w3 * d_3 + x = x + d_prime * dt + return x diff --git a/comfy/samplers.py b/comfy/samplers.py index 65c44791d02..d8037d8ea8b 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -518,7 +518,7 @@ class UNIPCBH2(Sampler): def sample(self, model_wrap, sigmas, extra_args, callback, noise, latent_image=None, denoise_mask=None, disable_pbar=False): return uni_pc.sample_unipc(model_wrap, noise, latent_image, sigmas, max_denoise=self.max_denoise(model_wrap, sigmas), extra_args=extra_args, noise_mask=denoise_mask, callback=callback, variant='bh2', disable=disable_pbar) -KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral", +KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] From 7114cfec0eefe713340257c85a2b342e98fdcfb2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 15 Nov 2023 15:55:02 -0500 Subject: [PATCH 198/420] Always clone graph data when loading to fix some load issues. --- web/scripts/app.js | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index d22b98c315f..4507527f686 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1489,16 +1489,18 @@ export class ComfyApp { let reset_invalid_values = false; if (!graphData) { - if (typeof structuredClone === "undefined") - { - graphData = JSON.parse(JSON.stringify(defaultGraph)); - }else - { - graphData = structuredClone(defaultGraph); - } + graphData = defaultGraph; reset_invalid_values = true; } + if (typeof structuredClone === "undefined") + { + graphData = JSON.parse(JSON.stringify(graphData)); + }else + { + graphData = structuredClone(graphData); + } + const missingNodeTypes = []; for (let n of graphData.nodes) { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now From dcec1047e6bb04880551a64cdb8f31dbde920ea0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 04:07:35 -0500 Subject: [PATCH 199/420] Invert the start and end percentages in the code. This doesn't affect how percentages behave in the frontend but breaks things if you relied on them in the backend. percent_to_sigma goes from 0 to 1.0 instead of 1.0 to 0 for less confusion. Make percent 0 return an extremely large sigma and percent 1.0 return a zero one to fix imprecision. --- comfy/controlnet.py | 4 ++-- comfy/model_sampling.py | 5 +++++ comfy/samplers.py | 2 ++ comfy_extras/nodes_model_advanced.py | 5 +++++ nodes.py | 6 +++--- 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 09868158287..433381df6ec 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -33,7 +33,7 @@ def __init__(self, device=None): self.cond_hint_original = None self.cond_hint = None self.strength = 1.0 - self.timestep_percent_range = (1.0, 0.0) + self.timestep_percent_range = (0.0, 1.0) self.timestep_range = None if device is None: @@ -42,7 +42,7 @@ def __init__(self, device=None): self.previous_controlnet = None self.global_average_pooling = False - def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)): + def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(0.0, 1.0)): self.cond_hint_original = cond_hint self.strength = strength self.timestep_percent_range = timestep_percent_range diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index a2935d47d18..d5b1642ef3a 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -76,5 +76,10 @@ def sigma(self, timestep): return log_sigma.exp() def percent_to_sigma(self, percent): + if percent <= 0.0: + return torch.tensor(999999999.9) + if percent >= 1.0: + return torch.tensor(0.0) + percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)) diff --git a/comfy/samplers.py b/comfy/samplers.py index d8037d8ea8b..1d012a514a7 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -220,6 +220,8 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): transformer_options["patches"] = patches transformer_options["cond_or_uncond"] = cond_or_uncond[:] + transformer_options["sigmas"] = timestep + c['transformer_options'] = transformer_options if 'model_function_wrapper' in model_options: diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 399123eaa2e..c8c4b4a1e70 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -66,6 +66,11 @@ def sigma(self, timestep): return log_sigma.exp() def percent_to_sigma(self, percent): + if percent <= 0.0: + return torch.tensor(999999999.9) + if percent >= 1.0: + return torch.tensor(0.0) + percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)) diff --git a/nodes.py b/nodes.py index 2bbfd8fe874..e8cfb5e6ac2 100644 --- a/nodes.py +++ b/nodes.py @@ -248,8 +248,8 @@ def set_range(self, conditioning, start, end): c = [] for t in conditioning: d = t[1].copy() - d['start_percent'] = 1.0 - start - d['end_percent'] = 1.0 - end + d['start_percent'] = start + d['end_percent'] = end n = [t[0], d] c.append(n) return (c, ) @@ -685,7 +685,7 @@ def apply_controlnet(self, positive, negative, control_net, image, strength, sta if prev_cnet in cnets: c_net = cnets[prev_cnet] else: - c_net = control_net.copy().set_cond_hint(control_hint, strength, (1.0 - start_percent, 1.0 - end_percent)) + c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent)) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net From 7ea6bb038cf488224269565bf0e0bcc400f0a7e2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 12:57:12 -0500 Subject: [PATCH 200/420] Print warning when controlnet can't be applied instead of crashing. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index cac0dfb6598..504b79ede66 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -255,7 +255,10 @@ def apply_control(h, control, name): if control is not None and name in control and len(control[name]) > 0: ctrl = control[name].pop() if ctrl is not None: - h += ctrl + try: + h += ctrl + except: + print("warning control could not be applied", h.shape, ctrl.shape) return h class UNetModel(nn.Module): From bd07ad1861949007139de7dd5c6bcdb77426919c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 13:23:25 -0500 Subject: [PATCH 201/420] Add PatchModelAddDownscale (Kohya Deep Shrink) node. By adding a downscale to the unet in the first timesteps this node lets you generate images at higher resolutions with less consistency issues. --- comfy_extras/nodes_model_downscale.py | 45 +++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 46 insertions(+) create mode 100644 comfy_extras/nodes_model_downscale.py diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py new file mode 100644 index 00000000000..f1b2d3ff2c5 --- /dev/null +++ b/comfy_extras/nodes_model_downscale.py @@ -0,0 +1,45 @@ +import torch + +class PatchModelAddDownscale: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "block_number": ("INT", {"default": 3, "min": 1, "max": 32, "step": 1}), + "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}), + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, block_number, downscale_factor, start_percent, end_percent): + sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() + sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() + + def input_block_patch(h, transformer_options): + if transformer_options["block"][1] == block_number: + sigma = transformer_options["sigmas"][0].item() + if sigma <= sigma_start and sigma >= sigma_end: + h = torch.nn.functional.interpolate(h, scale_factor=(1.0 / downscale_factor), mode="bicubic", align_corners=False) + return h + + def output_block_patch(h, hsp, transformer_options): + if h.shape[2] != hsp.shape[2]: + h = torch.nn.functional.interpolate(h, size=(hsp.shape[2], hsp.shape[3]), mode="bicubic", align_corners=False) + return h, hsp + + m = model.clone() + m.set_model_input_block_patch(input_block_patch) + m.set_model_output_block_patch(output_block_patch) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "PatchModelAddDownscale": PatchModelAddDownscale, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + # Sampling + "PatchModelAddDownscale": "PatchModelAddDownscale (Kohya Deep Shrink)", +} diff --git a/nodes.py b/nodes.py index e8cfb5e6ac2..f9d2d7f6c8b 100644 --- a/nodes.py +++ b/nodes.py @@ -1799,6 +1799,7 @@ def init_custom_nodes(): "nodes_custom_sampler.py", "nodes_hypertile.py", "nodes_model_advanced.py", + "nodes_model_downscale.py", ] for node_file in extras_files: From 9f00a18095e5f8ef114525bc19db035756501959 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 14:59:54 -0500 Subject: [PATCH 202/420] Fix potential issues. --- comfy/model_patcher.py | 2 +- comfy/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 02368433126..7f5ed45fee2 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -37,7 +37,7 @@ def model_size(self): return size def clone(self): - n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device) + n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device, weight_inplace_update=self.weight_inplace_update) n.patches = {} for k in self.patches: n.patches[k] = self.patches[k][:] diff --git a/comfy/utils.py b/comfy/utils.py index 1985012e0f1..f4c0ab41928 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -258,7 +258,7 @@ def set_attr(obj, attr, value): for name in attrs[:-1]: obj = getattr(obj, name) prev = getattr(obj, attrs[-1]) - setattr(obj, attrs[-1], torch.nn.Parameter(value)) + setattr(obj, attrs[-1], torch.nn.Parameter(value, requires_grad=False)) del prev def copy_to_param(obj, attr, value): From 7e3fe3ad28fad4dede2893d77093a086344b81b6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 15:26:28 -0500 Subject: [PATCH 203/420] Make deep shrink behave like it should. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 4 ++++ comfy/model_patcher.py | 3 +++ comfy_extras/nodes_model_downscale.py | 8 ++++++-- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 504b79ede66..10eb68d73b5 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -633,6 +633,10 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo h = p(h, transformer_options) hs.append(h) + if "input_block_patch_after_skip" in transformer_patches: + patch = transformer_patches["input_block_patch_after_skip"] + for p in patch: + h = p(h, transformer_options) transformer_options["block"] = ("middle", 0) h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 7f5ed45fee2..a3cffc3be9d 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -99,6 +99,9 @@ def set_model_attn2_output_patch(self, patch): def set_model_input_block_patch(self, patch): self.set_model_patch(patch, "input_block_patch") + def set_model_input_block_patch_after_skip(self, patch): + self.set_model_patch(patch, "input_block_patch_after_skip") + def set_model_output_block_patch(self, patch): self.set_model_patch(patch, "output_block_patch") diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index f1b2d3ff2c5..8850d094891 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -8,13 +8,14 @@ def INPUT_TYPES(s): "downscale_factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 9.0, "step": 0.001}), "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), + "downscale_after_skip": ("BOOLEAN", {"default": True}), }} RETURN_TYPES = ("MODEL",) FUNCTION = "patch" CATEGORY = "_for_testing" - def patch(self, model, block_number, downscale_factor, start_percent, end_percent): + def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() @@ -31,7 +32,10 @@ def output_block_patch(h, hsp, transformer_options): return h, hsp m = model.clone() - m.set_model_input_block_patch(input_block_patch) + if downscale_after_skip: + m.set_model_input_block_patch_after_skip(input_block_patch) + else: + m.set_model_input_block_patch(input_block_patch) m.set_model_output_block_patch(output_block_patch) return (m, ) From 107e78b1cb079f652408bece8b0045927dc9f1fd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 16 Nov 2023 23:12:55 -0500 Subject: [PATCH 204/420] Add support for loading SSD1B diffusers unet version. Improve diffusers model detection. --- comfy/model_detection.py | 76 +++++++++++++++++++++++----------------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 4f4e0b3b7f0..d65d91e7cb5 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -186,17 +186,24 @@ def convert_config(unet_config): def unet_config_from_diffusers_unet(state_dict, dtype): match = {} - attention_resolutions = [] + transformer_depth = [] attn_res = 1 - for i in range(5): - k = "down_blocks.{}.attentions.1.transformer_blocks.0.attn2.to_k.weight".format(i) - if k in state_dict: - match["context_dim"] = state_dict[k].shape[1] - attention_resolutions.append(attn_res) + down_blocks = count_blocks(state_dict, "down_blocks.{}") + for i in range(down_blocks): + attn_blocks = count_blocks(state_dict, "down_blocks.{}.attentions.".format(i) + '{}') + for ab in range(attn_blocks): + transformer_count = count_blocks(state_dict, "down_blocks.{}.attentions.{}.transformer_blocks.".format(i, ab) + '{}') + transformer_depth.append(transformer_count) + if transformer_count > 0: + match["context_dim"] = state_dict["down_blocks.{}.attentions.{}.transformer_blocks.0.attn2.to_k.weight".format(i, ab)].shape[1] + attn_res *= 2 + if attn_blocks == 0: + transformer_depth.append(0) + transformer_depth.append(0) - match["attention_resolutions"] = attention_resolutions + match["transformer_depth"] = transformer_depth match["model_channels"] = state_dict["conv_in.weight"].shape[0] match["in_channels"] = state_dict["conv_in.weight"].shape[1] @@ -208,50 +215,55 @@ def unet_config_from_diffusers_unet(state_dict, dtype): SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 4, 4, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 4, 'use_linear_in_transformer': True, 'context_dim': 1280, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [0, 0, 4, 4, 4, 4, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4, + 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0]} SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, - 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} + 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], + 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, + 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024, "num_head_channels": 64} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 1024} + 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} - SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': 2, - 'attention_resolutions': [1, 2, 4], 'transformer_depth': [1, 1, 1, 0], 'channel_mult': [1, 2, 4, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, "num_heads": 8} + SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, + 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], + 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8, + 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [4], 'transformer_depth': [0, 0, 1], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1]} SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [], 'transformer_depth': [0, 0, 0], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 0, 'use_linear_in_transformer': True, "num_head_channels": 64, 'context_dim': 1} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0, + 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0]} SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, - 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, - 'num_res_blocks': 2, 'attention_resolutions': [2, 4], 'transformer_depth': [0, 2, 10], 'channel_mult': [1, 2, 4], - 'transformer_depth_middle': 10, 'use_linear_in_transformer': True, 'context_dim': 2048, "num_head_channels": 64} + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} + + SSD_1B = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 4, 4], 'transformer_depth_output': [0, 0, 0, 1, 1, 2, 10, 4, 4], + 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64} - supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint] + supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B] for unet_config in supported_models: matches = True From 0cf4e8693945d68000e37fe291f877eff9ef0aaa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 17 Nov 2023 02:56:59 -0500 Subject: [PATCH 205/420] Add some command line arguments to store text encoder weights in fp8. Pytorch supports two variants of fp8: --fp8_e4m3fn-text-enc (the one that seems to give better results) --fp8_e5m2-text-enc --- comfy/cli_args.py | 7 +++++++ comfy/model_management.py | 15 +++++++++++++++ comfy/sd.py | 5 +---- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index e79b89c0f0d..72fce10872f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -62,6 +62,13 @@ def __call__(self, parser, namespace, values, option_string=None): fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.") fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.") +fpte_group = parser.add_mutually_exclusive_group() +fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).") +fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).") +fpte_group.add_argument("--fp16-text-enc", action="store_true", help="Store text encoder weights in fp16.") +fpte_group.add_argument("--fp32-text-enc", action="store_true", help="Store text encoder weights in fp32.") + + parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE", const=-1, help="Use torch-directml.") parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.") diff --git a/comfy/model_management.py b/comfy/model_management.py index be4301aa4e3..d4acd8950ca 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -482,6 +482,21 @@ def text_encoder_device(): else: return torch.device("cpu") +def text_encoder_dtype(device=None): + if args.fp8_e4m3fn_text_enc: + return torch.float8_e4m3fn + elif args.fp8_e5m2_text_enc: + return torch.float8_e5m2 + elif args.fp16_text_enc: + return torch.float16 + elif args.fp32_text_enc: + return torch.float32 + + if should_use_fp16(device, prioritize_performance=False): + return torch.float16 + else: + return torch.float32 + def vae_device(): return get_torch_device() diff --git a/comfy/sd.py b/comfy/sd.py index 65d94f46ecc..c3cc8e72080 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -95,10 +95,7 @@ def __init__(self, target=None, embedding_directory=None, no_init=False): load_device = model_management.text_encoder_device() offload_device = model_management.text_encoder_offload_device() params['device'] = offload_device - if model_management.should_use_fp16(load_device, prioritize_performance=False): - params['dtype'] = torch.float16 - else: - params['dtype'] = torch.float32 + params['dtype'] = model_management.text_encoder_dtype(load_device) self.cond_stage_model = clip(**(params)) From 8a451234b3090db488fbee9740a5f6be2f989253 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 18 Nov 2023 04:44:17 -0500 Subject: [PATCH 206/420] Add ImageCrop node. --- comfy_extras/nodes_images.py | 29 +++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 30 insertions(+) create mode 100644 comfy_extras/nodes_images.py diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py new file mode 100644 index 00000000000..2b8e93001af --- /dev/null +++ b/comfy_extras/nodes_images.py @@ -0,0 +1,29 @@ +import nodes +MAX_RESOLUTION = nodes.MAX_RESOLUTION + +class ImageCrop: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), + "width": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "height": ("INT", {"default": 512, "min": 1, "max": MAX_RESOLUTION, "step": 1}), + "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crop" + + CATEGORY = "image/transform" + + def crop(self, image, width, height, x, y): + x = min(x, image.shape[2] - 1) + y = min(y, image.shape[1] - 1) + to_x = width + x + to_y = height + y + img = image[:,y:to_y, x:to_x, :] + return (img,) + + +NODE_CLASS_MAPPINGS = { + "ImageCrop": ImageCrop, +} diff --git a/nodes.py b/nodes.py index f9d2d7f6c8b..2adc5e07371 100644 --- a/nodes.py +++ b/nodes.py @@ -1800,6 +1800,7 @@ def init_custom_nodes(): "nodes_hypertile.py", "nodes_model_advanced.py", "nodes_model_downscale.py", + "nodes_images.py", ] for node_file in extras_files: From d9d8702d8dd2337c64610633f5df2dcd402379a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 18 Nov 2023 23:20:29 -0500 Subject: [PATCH 207/420] percent_to_sigma now returns a float instead of a tensor. --- comfy/model_sampling.py | 6 +++--- comfy_extras/nodes_model_advanced.py | 6 +++--- comfy_extras/nodes_model_downscale.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index d5b1642ef3a..37a3ac725c6 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -77,9 +77,9 @@ def sigma(self, timestep): def percent_to_sigma(self, percent): if percent <= 0.0: - return torch.tensor(999999999.9) + return 999999999.9 if percent >= 1.0: - return torch.tensor(0.0) + return 0.0 percent = 1.0 - percent - return self.sigma(torch.tensor(percent * 999.0)) + return self.sigma(torch.tensor(percent * 999.0)).item() diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index c8c4b4a1e70..0f4ddd9c340 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -67,11 +67,11 @@ def sigma(self, timestep): def percent_to_sigma(self, percent): if percent <= 0.0: - return torch.tensor(999999999.9) + return 999999999.9 if percent >= 1.0: - return torch.tensor(0.0) + return 0.0 percent = 1.0 - percent - return self.sigma(torch.tensor(percent * 999.0)) + return self.sigma(torch.tensor(percent * 999.0)).item() def rescale_zero_terminal_snr_sigmas(sigmas): diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index 8850d094891..f65ef05e18b 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -16,8 +16,8 @@ def INPUT_TYPES(s): CATEGORY = "_for_testing" def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): - sigma_start = model.model.model_sampling.percent_to_sigma(start_percent).item() - sigma_end = model.model.model_sampling.percent_to_sigma(end_percent).item() + sigma_start = model.model.model_sampling.percent_to_sigma(start_percent) + sigma_end = model.model.model_sampling.percent_to_sigma(end_percent) def input_block_patch(h, transformer_options): if transformer_options["block"][1] == block_number: From dba4f3b4fce575994ed718ac31888620e8d6e733 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 19 Nov 2023 06:09:01 -0500 Subject: [PATCH 208/420] Add a RepeatImageBatch node. --- comfy_extras/nodes_images.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 2b8e93001af..8cb322327b0 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -23,7 +23,22 @@ def crop(self, image, width, height, x, y): img = image[:,y:to_y, x:to_x, :] return (img,) +class RepeatImageBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { "image": ("IMAGE",), + "amount": ("INT", {"default": 1, "min": 1, "max": 64}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + + CATEGORY = "image/batch" + + def repeat(self, image, amount): + s = image.repeat((amount, 1,1,1)) + return (s,) NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, + "RepeatImageBatch": RepeatImageBatch, } From 31c5ea7b2c79f36d3ebc729acf946ba47b4e5785 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 03:55:51 -0500 Subject: [PATCH 209/420] Add LatentInterpolate to interpolate between latents. --- comfy_extras/nodes_latent.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 001de39fceb..cedf39d6346 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -1,4 +1,5 @@ import comfy.utils +import torch def reshape_latent_to(target_shape, latent): if latent.shape[1:] != target_shape[1:]: @@ -67,8 +68,43 @@ def op(self, samples, multiplier): samples_out["samples"] = s1 * multiplier return (samples_out,) +class LatentInterpolate: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), + "samples2": ("LATENT",), + "ratio": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples1, samples2, ratio): + samples_out = samples1.copy() + + s1 = samples1["samples"] + s2 = samples2["samples"] + + s2 = reshape_latent_to(s1.shape, s2) + + m1 = torch.linalg.vector_norm(s1, dim=(1)) + m2 = torch.linalg.vector_norm(s2, dim=(1)) + + s1 = torch.nan_to_num(s1 / m1) + s2 = torch.nan_to_num(s2 / m2) + + t = (s1 * ratio + s2 * (1.0 - ratio)) + mt = torch.linalg.vector_norm(t, dim=(1)) + st = torch.nan_to_num(t / mt) + + samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio)) + return (samples_out,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, + "LatentInterpolate": LatentInterpolate, } From a03dde190ede39675736e746c3045ecfc4baa79b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 16:38:39 -0500 Subject: [PATCH 210/420] Cap maximum history size at 10000. Delete oldest entry when reached. --- execution.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/execution.py b/execution.py index 918c2bc5cc3..9a2ca5b9d04 100644 --- a/execution.py +++ b/execution.py @@ -681,6 +681,7 @@ def validate_prompt(prompt): return (True, None, list(good_outputs), node_errors) +MAXIMUM_HISTORY_SIZE = 10000 class PromptQueue: def __init__(self, server): @@ -713,6 +714,8 @@ def get(self): def task_done(self, item_id, outputs): with self.mutex: prompt = self.currently_running.pop(item_id) + if len(self.history) > MAXIMUM_HISTORY_SIZE: + self.history.pop(next(iter(self.history))) self.history[prompt[1]] = { "prompt": prompt, "outputs": {} } for o in outputs: self.history[prompt[1]]["outputs"][o] = outputs[o] From 2dd5b4dd78fc0a30f3d5baa0b99a6b10f002d917 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 16:51:41 -0500 Subject: [PATCH 211/420] Only show last 200 elements in the UI history tab. --- execution.py | 14 ++++++++++++-- server.py | 5 ++++- web/scripts/api.js | 2 +- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/execution.py b/execution.py index 9a2ca5b9d04..bca48a785c2 100644 --- a/execution.py +++ b/execution.py @@ -750,10 +750,20 @@ def delete_queue_item(self, function): return True return False - def get_history(self, prompt_id=None): + def get_history(self, prompt_id=None, max_items=None, offset=-1): with self.mutex: if prompt_id is None: - return copy.deepcopy(self.history) + out = {} + i = 0 + if offset < 0 and max_items is not None: + offset = len(self.history) - max_items + for k in self.history: + if i >= offset: + out[k] = self.history[k] + if max_items is not None and len(out) >= max_items: + break + i += 1 + return out elif prompt_id in self.history: return {prompt_id: copy.deepcopy(self.history[prompt_id])} else: diff --git a/server.py b/server.py index 11bd2a0fb44..1a8e92b8f96 100644 --- a/server.py +++ b/server.py @@ -431,7 +431,10 @@ async def get_object_info_node(request): @routes.get("/history") async def get_history(request): - return web.json_response(self.prompt_queue.get_history()) + max_items = request.rel_url.query.get("max_items", None) + if max_items is not None: + max_items = int(max_items) + return web.json_response(self.prompt_queue.get_history(max_items=max_items)) @routes.get("/history/{prompt_id}") async def get_history(request): diff --git a/web/scripts/api.js b/web/scripts/api.js index b1d245d73ff..de56b23108b 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -256,7 +256,7 @@ class ComfyApi extends EventTarget { */ async getHistory() { try { - const res = await this.fetchApi("/history"); + const res = await this.fetchApi("/history?max_items=200"); return { History: Object.values(await res.json()) }; } catch (error) { console.error(error); From ce67dcbcdabe2edf1497e37ecf1b6f976a3ecdf6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 20 Nov 2023 22:27:36 -0500 Subject: [PATCH 212/420] Make it easy for models to process the unet state dict on load. --- comfy/model_base.py | 1 + comfy/supported_models_base.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/model_base.py b/comfy/model_base.py index 37bf24bb8c6..772e2693493 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -121,6 +121,7 @@ def load_model_weights(self, sd, unet_prefix=""): if k.startswith(unet_prefix): to_load[k[len(unet_prefix):]] = sd.pop(k) + to_load = self.model_config.process_unet_state_dict(to_load) m, u = self.diffusion_model.load_state_dict(to_load, strict=False) if len(m) > 0: print("unet missing:", m) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 88a1d7fde49..6dfae034303 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -53,6 +53,9 @@ def get_model(self, state_dict, prefix="", device=None): def process_clip_state_dict(self, state_dict): return state_dict + def process_unet_state_dict(self, state_dict): + return state_dict + def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) From 6ff06fa7960524749d8e584100a0e50594485f29 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 21 Nov 2023 06:33:58 +0000 Subject: [PATCH 213/420] Animated image output support (#2008) * Refactor multiline widget into generic DOM widget * wip webp preview * webp support * fix check * fix sizing * show image when zoomed out * Swap webp checkto generic animated image flag * remove duplicate * Fix falsy check --- web/scripts/app.js | 78 +++++---- web/scripts/domWidget.js | 312 +++++++++++++++++++++++++++++++++ web/scripts/ui/imagePreview.js | 97 ++++++++++ web/scripts/widgets.js | 168 ++---------------- web/style.css | 15 ++ 5 files changed, 483 insertions(+), 187 deletions(-) create mode 100644 web/scripts/domWidget.js create mode 100644 web/scripts/ui/imagePreview.js diff --git a/web/scripts/app.js b/web/scripts/app.js index 4507527f686..601e486e6e4 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -4,7 +4,10 @@ import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; import { getPngMetadata, getWebpMetadata, importA1111, getLatentMetadata } from "./pnginfo.js"; +import { addDomClippingSetting } from "./domWidget.js"; +import { createImageHost, calculateImageGrid } from "./ui/imagePreview.js" +export const ANIM_PREVIEW_WIDGET = "$$comfy_animation_preview" function sanitizeNodeName(string) { let entityMap = { @@ -405,7 +408,9 @@ export class ComfyApp { return shiftY; } - node.prototype.setSizeForImage = function () { + node.prototype.setSizeForImage = function (force) { + if(!force && this.animatedImages) return; + if (this.inputHeight) { this.setSize(this.size); return; @@ -422,13 +427,20 @@ export class ComfyApp { let imagesChanged = false const output = app.nodeOutputs[this.id + ""]; - if (output && output.images) { + if (output?.images) { + this.animatedImages = output?.animated?.find(Boolean); if (this.images !== output.images) { this.images = output.images; imagesChanged = true; - imgURLs = imgURLs.concat(output.images.map(params => { - return api.apiURL("/view?" + new URLSearchParams(params).toString() + app.getPreviewFormatParam()); - })) + imgURLs = imgURLs.concat( + output.images.map((params) => { + return api.apiURL( + "/view?" + + new URLSearchParams(params).toString() + + (this.animatedImages ? "" : app.getPreviewFormatParam()) + ); + }) + ); } } @@ -507,7 +519,34 @@ export class ComfyApp { return true; } - if (this.imgs && this.imgs.length) { + if (this.imgs?.length) { + const widgetIdx = this.widgets?.findIndex((w) => w.name === ANIM_PREVIEW_WIDGET); + + if(this.animatedImages) { + // Instead of using the canvas we'll use a IMG + if(widgetIdx > -1) { + // Replace content + const widget = this.widgets[widgetIdx]; + widget.options.host.updateImages(this.imgs); + } else { + const host = createImageHost(this); + this.setSizeForImage(true); + const widget = this.addDOMWidget(ANIM_PREVIEW_WIDGET, "img", host.el, { + host, + getHeight: host.getHeight, + onDraw: host.onDraw, + hideOnZoom: false + }); + widget.serializeValue = () => undefined; + widget.options.host.updateImages(this.imgs); + } + return; + } + + if (widgetIdx > -1) { + this.widgets.splice(widgetIdx, 1); + } + const canvas = app.graph.list_of_graphcanvas[0]; const mouse = canvas.graph_mouse; if (!canvas.pointer_is_down && this.pointerDown) { @@ -547,31 +586,7 @@ export class ComfyApp { } else { cell_padding = 0; - let best = 0; - let w = this.imgs[0].naturalWidth; - let h = this.imgs[0].naturalHeight; - - // compact style - for (let c = 1; c <= numImages; c++) { - const rows = Math.ceil(numImages / c); - const cW = dw / c; - const cH = dh / rows; - const scaleX = cW / w; - const scaleY = cH / h; - - const scale = Math.min(scaleX, scaleY, 1); - const imageW = w * scale; - const imageH = h * scale; - const area = imageW * imageH * numImages; - - if (area > best) { - best = area; - cellWidth = imageW; - cellHeight = imageH; - cols = c; - shiftX = c * ((cW - imageW) / 2); - } - } + ({ cellWidth, cellHeight, cols, shiftX } = calculateImageGrid(this.imgs, dw, dh)); } let anyHovered = false; @@ -1272,6 +1287,7 @@ export class ComfyApp { canvasEl.tabIndex = "1"; document.body.prepend(canvasEl); + addDomClippingSetting(); this.#addProcessMouseHandler(); this.#addProcessKeyHandler(); this.#addConfigureHandler(); diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js new file mode 100644 index 00000000000..16f4e192eea --- /dev/null +++ b/web/scripts/domWidget.js @@ -0,0 +1,312 @@ +import { app, ANIM_PREVIEW_WIDGET } from "./app.js"; + +const SIZE = Symbol(); + +function intersect(a, b) { + const x = Math.max(a.x, b.x); + const num1 = Math.min(a.x + a.width, b.x + b.width); + const y = Math.max(a.y, b.y); + const num2 = Math.min(a.y + a.height, b.y + b.height); + if (num1 >= x && num2 >= y) return [x, y, num1 - x, num2 - y]; + else return null; +} + +function getClipPath(node, element, elRect) { + const selectedNode = Object.values(app.canvas.selected_nodes)[0]; + if (selectedNode && selectedNode !== node) { + const MARGIN = 7; + const scale = app.canvas.ds.scale; + + const intersection = intersect( + { x: elRect.x / scale, y: elRect.y / scale, width: elRect.width / scale, height: elRect.height / scale }, + { + x: selectedNode.pos[0] + app.canvas.ds.offset[0] - MARGIN, + y: selectedNode.pos[1] + app.canvas.ds.offset[1] - LiteGraph.NODE_TITLE_HEIGHT - MARGIN, + width: selectedNode.size[0] + MARGIN + MARGIN, + height: selectedNode.size[1] + LiteGraph.NODE_TITLE_HEIGHT + MARGIN + MARGIN, + } + ); + + if (!intersection) { + return ""; + } + + const widgetRect = element.getBoundingClientRect(); + const clipX = intersection[0] - widgetRect.x / scale + "px"; + const clipY = intersection[1] - widgetRect.y / scale + "px"; + const clipWidth = intersection[2] + "px"; + const clipHeight = intersection[3] + "px"; + const path = `polygon(0% 0%, 0% 100%, ${clipX} 100%, ${clipX} ${clipY}, calc(${clipX} + ${clipWidth}) ${clipY}, calc(${clipX} + ${clipWidth}) calc(${clipY} + ${clipHeight}), ${clipX} calc(${clipY} + ${clipHeight}), ${clipX} 100%, 100% 100%, 100% 0%)`; + return path; + } + return ""; +} + +function computeSize(size) { + if (this.widgets?.[0].last_y == null) return; + + let y = this.widgets[0].last_y; + let freeSpace = size[1] - y; + + let widgetHeight = 0; + let dom = []; + for (const w of this.widgets) { + if (w.type === "converted-widget") { + // Ignore + delete w.computedHeight; + } else if (w.computeSize) { + widgetHeight += w.computeSize()[1] + 4; + } else if (w.element) { + // Extract DOM widget size info + const styles = getComputedStyle(w.element); + let minHeight = w.options.getMinHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-min-height")); + let maxHeight = w.options.getMaxHeight?.() ?? parseInt(styles.getPropertyValue("--comfy-widget-max-height")); + + let prefHeight = w.options.getHeight?.() ?? styles.getPropertyValue("--comfy-widget-height"); + if (prefHeight.endsWith?.("%")) { + prefHeight = size[1] * (parseFloat(prefHeight.substring(0, prefHeight.length - 1)) / 100); + } else { + prefHeight = parseInt(prefHeight); + if (isNaN(minHeight)) { + minHeight = prefHeight; + } + } + if (isNaN(minHeight)) { + minHeight = 50; + } + if (!isNaN(maxHeight)) { + if (!isNaN(prefHeight)) { + prefHeight = Math.min(prefHeight, maxHeight); + } else { + prefHeight = maxHeight; + } + } + dom.push({ + minHeight, + prefHeight, + w, + }); + } else { + widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4; + } + } + + freeSpace -= widgetHeight; + + // Calculate sizes with all widgets at their min height + const prefGrow = []; // Nodes that want to grow to their prefd size + const canGrow = []; // Nodes that can grow to auto size + let growBy = 0; + for (const d of dom) { + freeSpace -= d.minHeight; + if (isNaN(d.prefHeight)) { + canGrow.push(d); + d.w.computedHeight = d.minHeight; + } else { + const diff = d.prefHeight - d.minHeight; + if (diff > 0) { + prefGrow.push(d); + growBy += diff; + d.diff = diff; + } else { + d.w.computedHeight = d.minHeight; + } + } + } + + if (this.imgs && !this.widgets.find((w) => w.name === ANIM_PREVIEW_WIDGET)) { + // Allocate space for image + freeSpace -= 220; + } + + if (freeSpace < 0) { + // Not enough space for all widgets so we need to grow + size[1] -= freeSpace; + this.graph.setDirtyCanvas(true); + } else { + // Share the space between each + const growDiff = freeSpace - growBy; + if (growDiff > 0) { + // All pref sizes can be fulfilled + freeSpace = growDiff; + for (const d of prefGrow) { + d.w.computedHeight = d.prefHeight; + } + } else { + // We need to grow evenly + const shared = -growDiff / prefGrow.length; + for (const d of prefGrow) { + d.w.computedHeight = d.prefHeight - shared; + } + freeSpace = 0; + } + + if (freeSpace > 0 && canGrow.length) { + // Grow any that are auto height + const shared = freeSpace / canGrow.length; + for (const d of canGrow) { + d.w.computedHeight += shared; + } + } + } + + // Position each of the widgets + for (const w of this.widgets) { + w.y = y; + if (w.computedHeight) { + y += w.computedHeight; + } else if (w.computeSize) { + y += w.computeSize()[1] + 4; + } else { + y += LiteGraph.NODE_WIDGET_HEIGHT + 4; + } + } +} + +// Override the compute visible nodes function to allow us to hide/show DOM elements when the node goes offscreen +const elementWidgets = new Set(); +const computeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes; +LGraphCanvas.prototype.computeVisibleNodes = function () { + const visibleNodes = computeVisibleNodes.apply(this, arguments); + for (const node of app.graph._nodes) { + if (elementWidgets.has(node)) { + const hidden = visibleNodes.indexOf(node) === -1; + for (const w of node.widgets) { + if (w.element) { + w.element.hidden = hidden; + if (hidden) { + w.options.onHide?.(w); + } + } + } + } + } + + return visibleNodes; +}; + +let enableDomClipping = true; + +export function addDomClippingSetting() { + app.ui.settings.addSetting({ + id: "Comfy.DOMClippingEnabled", + name: "Enable DOM element clipping (enabling may reduce performance)", + type: "boolean", + defaultValue: enableDomClipping, + onChange(value) { + console.log("enableDomClipping", enableDomClipping); + enableDomClipping = !!value; + }, + }); +} + +LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { + options = { hideOnZoom: true, selectOn: ["focus", "click"], ...options }; + + if (!element.parentElement) { + document.body.append(element); + } + + let mouseDownHandler; + if (element.blur) { + mouseDownHandler = (event) => { + if (!element.contains(event.target)) { + element.blur(); + } + }; + document.addEventListener("mousedown", mouseDownHandler); + } + + const widget = { + type, + name, + get value() { + return options.getValue?.() ?? undefined; + }, + set value(v) { + options.setValue?.(v); + widget.callback?.(widget.value); + }, + draw: function (ctx, node, widgetWidth, y, widgetHeight) { + if (widget.computedHeight == null) { + computeSize.call(node, node.size); + } + + const hidden = + (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) || + widget.computedHeight <= 0 || + widget.type === "converted-widget"; + element.hidden = hidden; + element.style.display = hidden ? "none" : null; + if (hidden) { + widget.options.onHide?.(widget); + return; + } + + const margin = 10; + const elRect = ctx.canvas.getBoundingClientRect(); + const transform = new DOMMatrix() + .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height) + .multiplySelf(ctx.getTransform()) + .translateSelf(margin, margin + y); + + const scale = new DOMMatrix().scaleSelf(transform.a, transform.d); + + Object.assign(element.style, { + transformOrigin: "0 0", + transform: scale, + left: `${transform.a + transform.e}px`, + top: `${transform.d + transform.f}px`, + width: `${widgetWidth - margin * 2}px`, + height: `${(widget.computedHeight ?? 50) - margin * 2}px`, + position: "absolute", + zIndex: app.graph._nodes.indexOf(node), + }); + + if (enableDomClipping) { + element.style.clipPath = getClipPath(node, element, elRect); + element.style.willChange = "clip-path"; + } + + this.options.onDraw?.(widget); + }, + element, + options, + onRemove() { + if (mouseDownHandler) { + document.removeEventListener("mousedown", mouseDownHandler); + } + element.remove(); + }, + }; + + for (const evt of options.selectOn) { + element.addEventListener(evt, () => { + app.canvas.selectNode(this); + app.canvas.bringToFront(this); + }); + } + + this.addCustomWidget(widget); + elementWidgets.add(this); + + const onRemoved = this.onRemoved; + this.onRemoved = function () { + element.remove(); + elementWidgets.delete(this); + onRemoved?.apply(this, arguments); + }; + + if (!this[SIZE]) { + this[SIZE] = true; + const onResize = this.onResize; + this.onResize = function (size) { + options.beforeResize?.call(widget, this); + computeSize.call(this, size); + onResize?.apply(this, arguments); + options.afterResize?.call(widget, this); + }; + } + + return widget; +}; diff --git a/web/scripts/ui/imagePreview.js b/web/scripts/ui/imagePreview.js new file mode 100644 index 00000000000..2a7f66b8f3b --- /dev/null +++ b/web/scripts/ui/imagePreview.js @@ -0,0 +1,97 @@ +import { $el } from "../ui.js"; + +export function calculateImageGrid(imgs, dw, dh) { + let best = 0; + let w = imgs[0].naturalWidth; + let h = imgs[0].naturalHeight; + const numImages = imgs.length; + + let cellWidth, cellHeight, cols, rows, shiftX; + // compact style + for (let c = 1; c <= numImages; c++) { + const r = Math.ceil(numImages / c); + const cW = dw / c; + const cH = dh / r; + const scaleX = cW / w; + const scaleY = cH / h; + + const scale = Math.min(scaleX, scaleY, 1); + const imageW = w * scale; + const imageH = h * scale; + const area = imageW * imageH * numImages; + + if (area > best) { + best = area; + cellWidth = imageW; + cellHeight = imageH; + cols = c; + rows = r; + shiftX = c * ((cW - imageW) / 2); + } + } + + return { cellWidth, cellHeight, cols, rows, shiftX }; +} + +export function createImageHost(node) { + const el = $el("div.comfy-img-preview"); + let currentImgs; + let first = true; + + function updateSize() { + let w = null; + let h = null; + + if (currentImgs) { + let elH = el.clientHeight; + if (first) { + first = false; + // On first run, if we are small then grow a bit + if (elH < 190) { + elH = 190; + } + el.style.setProperty("--comfy-widget-min-height", elH); + } else { + el.style.setProperty("--comfy-widget-min-height", null); + } + + const nw = node.size[0]; + ({ cellWidth: w, cellHeight: h } = calculateImageGrid(currentImgs, nw - 20, elH)); + w += "px"; + h += "px"; + + el.style.setProperty("--comfy-img-preview-width", w); + el.style.setProperty("--comfy-img-preview-height", h); + } + } + return { + el, + updateImages(imgs) { + if (imgs !== currentImgs) { + if (currentImgs == null) { + requestAnimationFrame(() => { + updateSize(); + }); + } + el.replaceChildren(...imgs); + currentImgs = imgs; + node.onResize(node.size); + node.graph.setDirtyCanvas(true, true); + } + }, + getHeight() { + updateSize(); + }, + onDraw() { + // Element from point uses a hittest find elements so we need to toggle pointer events + el.style.pointerEvents = "all"; + const over = document.elementFromPoint(app.canvas.mouse[0], app.canvas.mouse[1]); + el.style.pointerEvents = "none"; + + if(!over) return; + // Set the overIndex so Open Image etc work + const idx = currentImgs.indexOf(over); + node.overIndex = idx; + }, + }; +} diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 36bc7ff7fd7..ccddc0bc44b 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -1,4 +1,5 @@ import { api } from "./api.js" +import "./domWidget.js"; function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { let defaultVal = inputData[1]["default"]; @@ -97,166 +98,21 @@ function seedWidget(node, inputName, inputData, app) { seed.widget.linkedWidgets = [seedControl]; return seed; } - -const MultilineSymbol = Symbol(); -const MultilineResizeSymbol = Symbol(); - function addMultilineWidget(node, name, opts, app) { - const MIN_SIZE = 50; - - function computeSize(size) { - if (node.widgets[0].last_y == null) return; - - let y = node.widgets[0].last_y; - let freeSpace = size[1] - y; - - // Compute the height of all non customtext widgets - let widgetHeight = 0; - const multi = []; - for (let i = 0; i < node.widgets.length; i++) { - const w = node.widgets[i]; - if (w.type === "customtext") { - multi.push(w); - } else { - if (w.computeSize) { - widgetHeight += w.computeSize()[1] + 4; - } else { - widgetHeight += LiteGraph.NODE_WIDGET_HEIGHT + 4; - } - } - } - - // See how large each text input can be - freeSpace -= widgetHeight; - freeSpace /= multi.length + (!!node.imgs?.length); - - if (freeSpace < MIN_SIZE) { - // There isnt enough space for all the widgets, increase the size of the node - freeSpace = MIN_SIZE; - node.size[1] = y + widgetHeight + freeSpace * (multi.length + (!!node.imgs?.length)); - node.graph.setDirtyCanvas(true); - } - - // Position each of the widgets - for (const w of node.widgets) { - w.y = y; - if (w.type === "customtext") { - y += freeSpace; - w.computedHeight = freeSpace - multi.length*4; - } else if (w.computeSize) { - y += w.computeSize()[1] + 4; - } else { - y += LiteGraph.NODE_WIDGET_HEIGHT + 4; - } - } - - node.inputHeight = freeSpace; - } - - const widget = { - type: "customtext", - name, - get value() { - return this.inputEl.value; + const inputEl = document.createElement("textarea"); + inputEl.className = "comfy-multiline-input"; + inputEl.value = opts.defaultVal; + inputEl.placeholder = opts.placeholder || ""; + + const widget = node.addDOMWidget(name, "customtext", inputEl, { + getValue() { + return inputEl.value; }, - set value(x) { - this.inputEl.value = x; - }, - draw: function (ctx, _, widgetWidth, y, widgetHeight) { - if (!this.parent.inputHeight) { - // If we are initially offscreen when created we wont have received a resize event - // Calculate it here instead - computeSize(node.size); - } - const visible = app.canvas.ds.scale > 0.5 && this.type === "customtext"; - const margin = 10; - const elRect = ctx.canvas.getBoundingClientRect(); - const transform = new DOMMatrix() - .scaleSelf(elRect.width / ctx.canvas.width, elRect.height / ctx.canvas.height) - .multiplySelf(ctx.getTransform()) - .translateSelf(margin, margin + y); - - const scale = new DOMMatrix().scaleSelf(transform.a, transform.d) - Object.assign(this.inputEl.style, { - transformOrigin: "0 0", - transform: scale, - left: `${transform.a + transform.e}px`, - top: `${transform.d + transform.f}px`, - width: `${widgetWidth - (margin * 2)}px`, - height: `${this.parent.inputHeight - (margin * 2)}px`, - position: "absolute", - background: (!node.color)?'':node.color, - color: (!node.color)?'':'white', - zIndex: app.graph._nodes.indexOf(node), - }); - this.inputEl.hidden = !visible; + setValue(v) { + inputEl.value = v; }, - }; - widget.inputEl = document.createElement("textarea"); - widget.inputEl.className = "comfy-multiline-input"; - widget.inputEl.value = opts.defaultVal; - widget.inputEl.placeholder = opts.placeholder || ""; - document.addEventListener("mousedown", function (event) { - if (!widget.inputEl.contains(event.target)) { - widget.inputEl.blur(); - } }); - widget.parent = node; - document.body.appendChild(widget.inputEl); - - node.addCustomWidget(widget); - - app.canvas.onDrawBackground = function () { - // Draw node isnt fired once the node is off the screen - // if it goes off screen quickly, the input may not be removed - // this shifts it off screen so it can be moved back if the node is visible. - for (let n in app.graph._nodes) { - n = graph._nodes[n]; - for (let w in n.widgets) { - let wid = n.widgets[w]; - if (Object.hasOwn(wid, "inputEl")) { - wid.inputEl.style.left = -8000 + "px"; - wid.inputEl.style.position = "absolute"; - } - } - } - }; - - node.onRemoved = function () { - // When removing this node we need to remove the input from the DOM - for (let y in this.widgets) { - if (this.widgets[y].inputEl) { - this.widgets[y].inputEl.remove(); - } - } - }; - - widget.onRemove = () => { - widget.inputEl?.remove(); - - // Restore original size handler if we are the last - if (!--node[MultilineSymbol]) { - node.onResize = node[MultilineResizeSymbol]; - delete node[MultilineSymbol]; - delete node[MultilineResizeSymbol]; - } - }; - - if (node[MultilineSymbol]) { - node[MultilineSymbol]++; - } else { - node[MultilineSymbol] = 1; - const onResize = (node[MultilineResizeSymbol] = node.onResize); - - node.onResize = function (size) { - computeSize(size); - - // Call original resizer handler - if (onResize) { - onResize.apply(this, arguments); - } - }; - } + widget.inputEl = inputEl; return { minWidth: 400, minHeight: 200, widget }; } diff --git a/web/style.css b/web/style.css index 692fa31d672..378fe0a48b9 100644 --- a/web/style.css +++ b/web/style.css @@ -409,6 +409,21 @@ dialog::backdrop { width: calc(100% - 10px); } +.comfy-img-preview { + pointer-events: none; + overflow: hidden; + display: flex; + flex-wrap: wrap; + align-content: flex-start; + justify-content: center; +} + +.comfy-img-preview img { + object-fit: contain; + width: var(--comfy-img-preview-width); + height: var(--comfy-img-preview-height); +} + /* Search box */ .litegraph.litesearchbox { From 89e31abc46df00d10d48b8a4e36256fefd5973ed Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 21 Nov 2023 17:54:01 +0000 Subject: [PATCH 214/420] Fix clipping of collapsed nodes --- web/scripts/domWidget.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 16f4e192eea..2f73e573e13 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -17,13 +17,14 @@ function getClipPath(node, element, elRect) { const MARGIN = 7; const scale = app.canvas.ds.scale; + const bounding = selectedNode.getBounding(); const intersection = intersect( { x: elRect.x / scale, y: elRect.y / scale, width: elRect.width / scale, height: elRect.height / scale }, { x: selectedNode.pos[0] + app.canvas.ds.offset[0] - MARGIN, y: selectedNode.pos[1] + app.canvas.ds.offset[1] - LiteGraph.NODE_TITLE_HEIGHT - MARGIN, - width: selectedNode.size[0] + MARGIN + MARGIN, - height: selectedNode.size[1] + LiteGraph.NODE_TITLE_HEIGHT + MARGIN + MARGIN, + width: bounding[2] + MARGIN + MARGIN, + height: bounding[3] + MARGIN + MARGIN, } ); From cd4fc77d5f83867cdfb806f0c96c65ce8a84322c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 12:54:19 -0500 Subject: [PATCH 215/420] Add taesd and taesdxl to VAELoader node. They will show up if both the taesd_encoder and taesd_decoder or taesdxl model files are present in the models/vae_approx directory. --- comfy/sd.py | 17 ++++++++++---- comfy/taesd/taesd.py | 19 +++++++++++---- latent_preview.py | 5 +--- nodes.py | 55 +++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 79 insertions(+), 17 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index c3cc8e72080..0f83cc5814d 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -23,6 +23,7 @@ import comfy.lora import comfy.t2i_adapter.adapter import comfy.supported_models_base +import comfy.taesd.taesd def load_model_weights(model, sd): m, u = model.load_state_dict(sd, strict=False) @@ -154,10 +155,16 @@ def __init__(self, sd=None, device=None, config=None): if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) + self.memory_used_encode = lambda shape: (2078 * shape[2] * shape[3]) * 1.7 #These are for AutoencoderKL and need tweaking + self.memory_used_decode = lambda shape: (2562 * shape[2] * shape[3] * 64) * 1.7 + if config is None: - #default SD1.x/SD2.x VAE parameters - ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} - self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) + if "taesd_decoder.1.weight" in sd: + self.first_stage_model = comfy.taesd.taesd.TAESD() + else: + #default SD1.x/SD2.x VAE parameters + ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) else: self.first_stage_model = AutoencoderKL(**(config['params'])) self.first_stage_model = self.first_stage_model.eval() @@ -206,7 +213,7 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): def decode(self, samples_in): self.first_stage_model = self.first_stage_model.to(self.device) try: - memory_used = (2562 * samples_in.shape[2] * samples_in.shape[3] * 64) * 1.7 + memory_used = self.memory_used_decode(samples_in.shape) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) @@ -234,7 +241,7 @@ def encode(self, pixel_samples): self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: - memory_used = (2078 * pixel_samples.shape[2] * pixel_samples.shape[3]) * 1.7 #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. + memory_used = self.memory_used_encode(pixel_samples.shape) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py index 8df1f160915..46f3097a2a1 100644 --- a/comfy/taesd/taesd.py +++ b/comfy/taesd/taesd.py @@ -46,15 +46,16 @@ class TAESD(nn.Module): latent_magnitude = 3 latent_shift = 0.5 - def __init__(self, encoder_path="taesd_encoder.pth", decoder_path="taesd_decoder.pth"): + def __init__(self, encoder_path=None, decoder_path=None): """Initialize pretrained TAESD on the given device from the given checkpoints.""" super().__init__() - self.encoder = Encoder() - self.decoder = Decoder() + self.taesd_encoder = Encoder() + self.taesd_decoder = Decoder() + self.vae_scale = torch.nn.Parameter(torch.tensor(1.0)) if encoder_path is not None: - self.encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True)) + self.taesd_encoder.load_state_dict(comfy.utils.load_torch_file(encoder_path, safe_load=True)) if decoder_path is not None: - self.decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True)) + self.taesd_decoder.load_state_dict(comfy.utils.load_torch_file(decoder_path, safe_load=True)) @staticmethod def scale_latents(x): @@ -65,3 +66,11 @@ def scale_latents(x): def unscale_latents(x): """[0, 1] -> raw latents""" return x.sub(TAESD.latent_shift).mul(2 * TAESD.latent_magnitude) + + def decode(self, x): + x_sample = self.taesd_decoder(x * self.vae_scale) + x_sample = x_sample.sub(0.5).mul(2) + return x_sample + + def encode(self, x): + return self.taesd_encoder(x * 0.5 + 0.5) / self.vae_scale diff --git a/latent_preview.py b/latent_preview.py index 6e758a1a9d1..61754751efe 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -22,10 +22,7 @@ def __init__(self, taesd): self.taesd = taesd def decode_latent_to_preview(self, x0): - x_sample = self.taesd.decoder(x0[:1])[0].detach() - # x_sample = self.taesd.unscale_latents(x_sample).div(4).add(0.5) # returns value in [-2, 2] - x_sample = x_sample.sub(0.5).mul(2) - + x_sample = self.taesd.decode(x0[:1])[0].detach() x_sample = torch.clamp((x_sample + 1.0) / 2.0, min=0.0, max=1.0) x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) diff --git a/nodes.py b/nodes.py index 2adc5e07371..2de468da709 100644 --- a/nodes.py +++ b/nodes.py @@ -573,9 +573,55 @@ def load_lora(self, model, clip, lora_name, strength_model, strength_clip): return (model_lora, clip_lora) class VAELoader: + @staticmethod + def vae_list(): + vaes = folder_paths.get_filename_list("vae") + approx_vaes = folder_paths.get_filename_list("vae_approx") + sdxl_taesd_enc = False + sdxl_taesd_dec = False + sd1_taesd_enc = False + sd1_taesd_dec = False + + for v in approx_vaes: + if v.startswith("taesd_decoder."): + sd1_taesd_dec = True + elif v.startswith("taesd_encoder."): + sd1_taesd_enc = True + elif v.startswith("taesdxl_decoder."): + sdxl_taesd_dec = True + elif v.startswith("taesdxl_encoder."): + sdxl_taesd_enc = True + if sd1_taesd_dec and sd1_taesd_enc: + vaes.append("taesd") + if sdxl_taesd_dec and sdxl_taesd_enc: + vaes.append("taesdxl") + return vaes + + @staticmethod + def load_taesd(name): + sd = {} + approx_vaes = folder_paths.get_filename_list("vae_approx") + + encoder = next(filter(lambda a: a.startswith("{}_encoder.".format(name)), approx_vaes)) + decoder = next(filter(lambda a: a.startswith("{}_decoder.".format(name)), approx_vaes)) + + enc = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", encoder)) + for k in enc: + sd["taesd_encoder.{}".format(k)] = enc[k] + + dec = comfy.utils.load_torch_file(folder_paths.get_full_path("vae_approx", decoder)) + for k in dec: + sd["taesd_decoder.{}".format(k)] = dec[k] + + if name == "taesd": + sd["vae_scale"] = torch.tensor(0.18215) + elif name == "taesdxl": + sd["vae_scale"] = torch.tensor(0.13025) + return sd + @classmethod def INPUT_TYPES(s): - return {"required": { "vae_name": (folder_paths.get_filename_list("vae"), )}} + return {"required": { "vae_name": (s.vae_list(), )}} RETURN_TYPES = ("VAE",) FUNCTION = "load_vae" @@ -583,8 +629,11 @@ def INPUT_TYPES(s): #TODO: scale factor? def load_vae(self, vae_name): - vae_path = folder_paths.get_full_path("vae", vae_name) - sd = comfy.utils.load_torch_file(vae_path) + if vae_name in ["taesd", "taesdxl"]: + sd = self.load_taesd(vae_name) + else: + vae_path = folder_paths.get_full_path("vae", vae_name) + sd = comfy.utils.load_torch_file(vae_path) vae = comfy.sd.VAE(sd=sd) return (vae,) From 6a491ebe2729c675322491e255a72d5ac0ef5bf6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 16:29:18 -0500 Subject: [PATCH 216/420] Allow model config to preprocess the vae state dict on load. --- comfy/sd.py | 1 + comfy/supported_models_base.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 0f83cc5814d..c006a0362c7 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -448,6 +448,7 @@ class WeightsLoader(torch.nn.Module): if output_vae: vae_sd = comfy.utils.state_dict_prefix_replace(sd, {"first_stage_model.": ""}, filter_keys=True) + vae_sd = model_config.process_vae_state_dict(vae_sd) vae = VAE(sd=vae_sd) if output_clip: diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 6dfae034303..b073eb4fc58 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -56,6 +56,9 @@ def process_clip_state_dict(self, state_dict): def process_unet_state_dict(self, state_dict): return state_dict + def process_vae_state_dict(self, state_dict): + return state_dict + def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) From 72741105a687c67137eb5d7a38840b8373d82e61 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 21 Nov 2023 17:18:49 -0500 Subject: [PATCH 217/420] Remove useless code. --- .../modules/diffusionmodules/openaimodel.py | 31 +++++++------------ 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 10eb68d73b5..e8f35a540fa 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -28,25 +28,6 @@ def forward(self, x, emb): Apply the module to `x` given `emb` timestep embeddings. """ - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None, transformer_options={}, output_shape=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context, transformer_options) - elif isinstance(layer, Upsample): - x = layer(x, output_shape=output_shape) - else: - x = layer(x) - return x - #This is needed because accelerate makes a copy of transformer_options which breaks "current_index" def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None): for layer in ts: @@ -54,13 +35,23 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out x = layer(x, emb) elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) - transformer_options["current_index"] += 1 + if "current_index" in transformer_options: + transformer_options["current_index"] += 1 elif isinstance(layer, Upsample): x = layer(x, output_shape=output_shape) else: x = layer(x) return x +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, *args, **kwargs): + return forward_timestep_embed(self, *args, **kwargs) + class Upsample(nn.Module): """ An upsampling layer with an optional convolution. From c3ae99a749fa1e9a6dbb96c69c65c6fcf2507af3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 03:23:16 -0500 Subject: [PATCH 218/420] Allow controlling downscale and upscale methods in PatchModelAddDownscale. --- comfy/utils.py | 6 ++++-- comfy_extras/nodes_model_downscale.py | 10 +++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/comfy/utils.py b/comfy/utils.py index f4c0ab41928..294bbb425ff 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -318,7 +318,9 @@ def generate_bilinear_data(length_old, length_new, device): coords_2 = torch.nn.functional.interpolate(coords_2, size=(1, length_new), mode="bilinear") coords_2 = coords_2.to(torch.int64) return ratios, coords_1, coords_2 - + + orig_dtype = samples.dtype + samples = samples.float() n,c,h,w = samples.shape h_new, w_new = (height, width) @@ -347,7 +349,7 @@ def generate_bilinear_data(length_old, length_new, device): result = slerp(pass_1, pass_2, ratios) result = result.reshape(n, h_new, w_new, c).movedim(-1, 1) - return result + return result.to(orig_dtype) def lanczos(samples, width, height): images = [Image.fromarray(np.clip(255. * image.movedim(0, -1).cpu().numpy(), 0, 255).astype(np.uint8)) for image in samples] diff --git a/comfy_extras/nodes_model_downscale.py b/comfy_extras/nodes_model_downscale.py index f65ef05e18b..48bcc689273 100644 --- a/comfy_extras/nodes_model_downscale.py +++ b/comfy_extras/nodes_model_downscale.py @@ -1,6 +1,8 @@ import torch +import comfy.utils class PatchModelAddDownscale: + upscale_methods = ["bicubic", "nearest-exact", "bilinear", "area", "bislerp"] @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), @@ -9,13 +11,15 @@ def INPUT_TYPES(s): "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), "end_percent": ("FLOAT", {"default": 0.35, "min": 0.0, "max": 1.0, "step": 0.001}), "downscale_after_skip": ("BOOLEAN", {"default": True}), + "downscale_method": (s.upscale_methods,), + "upscale_method": (s.upscale_methods,), }} RETURN_TYPES = ("MODEL",) FUNCTION = "patch" CATEGORY = "_for_testing" - def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip): + def patch(self, model, block_number, downscale_factor, start_percent, end_percent, downscale_after_skip, downscale_method, upscale_method): sigma_start = model.model.model_sampling.percent_to_sigma(start_percent) sigma_end = model.model.model_sampling.percent_to_sigma(end_percent) @@ -23,12 +27,12 @@ def input_block_patch(h, transformer_options): if transformer_options["block"][1] == block_number: sigma = transformer_options["sigmas"][0].item() if sigma <= sigma_start and sigma >= sigma_end: - h = torch.nn.functional.interpolate(h, scale_factor=(1.0 / downscale_factor), mode="bicubic", align_corners=False) + h = comfy.utils.common_upscale(h, round(h.shape[-1] * (1.0 / downscale_factor)), round(h.shape[-2] * (1.0 / downscale_factor)), downscale_method, "disabled") return h def output_block_patch(h, hsp, transformer_options): if h.shape[2] != hsp.shape[2]: - h = torch.nn.functional.interpolate(h, size=(hsp.shape[2], hsp.shape[3]), mode="bicubic", align_corners=False) + h = comfy.utils.common_upscale(h, hsp.shape[-1], hsp.shape[-2], upscale_method, "disabled") return h, hsp m = model.clone() From ab7d4f784892c275e888d71aa80a3a2ed59d9b83 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 22 Nov 2023 13:53:30 +0000 Subject: [PATCH 219/420] Handle collapsing to hide element --- web/scripts/domWidget.js | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 16f4e192eea..0f8a2eb0179 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -233,6 +233,7 @@ LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { } const hidden = + node.flags?.collapsed || (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) || widget.computedHeight <= 0 || widget.type === "converted-widget"; @@ -290,6 +291,15 @@ LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { this.addCustomWidget(widget); elementWidgets.add(this); + const collapse = this.collapse; + this.collapse = function() { + collapse.apply(this, arguments); + if(this.flags?.collapsed) { + element.hidden = true; + element.style.display = "none"; + } + } + const onRemoved = this.onRemoved; this.onRemoved = function () { element.remove(); From 70d2ea0faa28e1727f7535466ac5378e786b32cb Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 22 Nov 2023 17:52:20 +0000 Subject: [PATCH 220/420] Control filter list (#2009) * Add control_filter_list to filter items after queue * fix regex * backwards compatibility * formatting * revert * Add and fix test --- tests-ui/tests/widgetInputs.test.js | 96 ++++++++++++++++++++++++++--- web/extensions/core/widgetInputs.js | 8 ++- web/scripts/widgets.js | 56 ++++++++++++++--- 3 files changed, 141 insertions(+), 19 deletions(-) diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index 022e5492667..e1873105acc 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -14,10 +14,10 @@ const lg = require("../utils/litegraph"); * @param { InstanceType } graph * @param { InstanceType } input * @param { string } widgetType - * @param { boolean } hasControlWidget + * @param { number } controlWidgetCount * @returns */ -async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasControlWidget) { +async function connectPrimitiveAndReload(ez, graph, input, widgetType, controlWidgetCount = 0) { // Connect to primitive and ensure its still connected after let primitive = ez.PrimitiveNode(); primitive.outputs[0].connectTo(input); @@ -33,13 +33,17 @@ async function connectPrimitiveAndReload(ez, graph, input, widgetType, hasContro expect(valueWidget.widget.type).toBe(widgetType); // Check if control_after_generate should be added - if (hasControlWidget) { + if (controlWidgetCount) { const controlWidget = primitive.widgets.control_after_generate; expect(controlWidget.widget.type).toBe("combo"); + if(widgetType === "combo") { + const filterWidget = primitive.widgets.control_filter_list; + expect(filterWidget.widget.type).toBe("string"); + } } // Ensure we dont have other widgets - expect(primitive.node.widgets).toHaveLength(1 + +!!hasControlWidget); + expect(primitive.node.widgets).toHaveLength(1 + controlWidgetCount); }); return primitive; @@ -55,8 +59,8 @@ describe("widget inputs", () => { }); [ - { name: "int", type: "INT", widget: "number", control: true }, - { name: "float", type: "FLOAT", widget: "number", control: true }, + { name: "int", type: "INT", widget: "number", control: 1 }, + { name: "float", type: "FLOAT", widget: "number", control: 1 }, { name: "text", type: "STRING" }, { name: "customtext", @@ -64,7 +68,7 @@ describe("widget inputs", () => { opt: { multiline: true }, }, { name: "toggle", type: "BOOLEAN" }, - { name: "combo", type: ["a", "b", "c"], control: true }, + { name: "combo", type: ["a", "b", "c"], control: 2 }, ].forEach((c) => { test(`widget conversion + primitive works on ${c.name}`, async () => { const { ez, graph } = await start({ @@ -106,7 +110,7 @@ describe("widget inputs", () => { n.widgets.ckpt_name.convertToInput(); expect(n.inputs.length).toEqual(inputCount + 1); - const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", true); + const primitive = await connectPrimitiveAndReload(ez, graph, n.inputs.ckpt_name, "combo", 2); // Disconnect & reconnect primitive.outputs[0].connections[0].disconnect(); @@ -226,7 +230,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (!assertNotNullOrUndefined(input)) return; - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); w = n.widgets.example; @@ -258,7 +262,7 @@ describe("widget inputs", () => { // Reload and ensure it still only has 1 converted widget if (assertNotNullOrUndefined(input)) { - await connectPrimitiveAndReload(ez, graph, input, "number", true); + await connectPrimitiveAndReload(ez, graph, input, "number", 1); n = graph.find(n); expect(n.widgets).toHaveLength(1); expect(n.widgets.example.isConvertedToInput).toBeTruthy(); @@ -316,4 +320,76 @@ describe("widget inputs", () => { n1.outputs[0].connectTo(n2.inputs[0]); expect(() => n1.outputs[0].connectTo(n3.inputs[0])).toThrow(); }); + + test("combo primitive can filter list when control_after_generate called", async () => { + const { ez } = await start({ + mockNodeDefs: { + ...makeNodeDef("TestNode1", { example: [["A", "B", "C", "D", "AA", "BB", "CC", "DD", "AAA", "BBB"], {}] }), + }, + }); + + const n1 = ez.TestNode1(); + n1.widgets.example.convertToInput(); + const p = ez.PrimitiveNode() + p.outputs[0].connectTo(n1.inputs[0]); + + const value = p.widgets.value; + const control = p.widgets.control_after_generate.widget; + const filter = p.widgets.control_filter_list; + + expect(p.widgets.length).toBe(3); + control.value = "increment"; + expect(value.value).toBe("A"); + + // Manually trigger after queue when set to increment + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Filter to items containing D + filter.value = "D"; + control["afterQueued"](); + expect(value.value).toBe("D"); + control["afterQueued"](); + expect(value.value).toBe("DD"); + + // Check decrement + value.value = "BBB"; + control.value = "decrement"; + filter.value = "B"; + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("B"); + + // Check regex works + value.value = "BBB"; + filter.value = "/[AB]|^C$/"; + control["afterQueued"](); + expect(value.value).toBe("AAA"); + control["afterQueued"](); + expect(value.value).toBe("BB"); + control["afterQueued"](); + expect(value.value).toBe("AA"); + control["afterQueued"](); + expect(value.value).toBe("C"); + control["afterQueued"](); + expect(value.value).toBe("B"); + control["afterQueued"](); + expect(value.value).toBe("A"); + + // Check random + control.value = "randomize"; + filter.value = "/D/"; + for(let i = 0; i < 100; i++) { + control["afterQueued"](); + expect(value.value === "D" || value.value === "DD").toBeTruthy(); + } + + // Ensure it doesnt apply when fixed + control.value = "fixed"; + value.value = "B"; + filter.value = "C"; + control["afterQueued"](); + expect(value.value).toBe("B"); + }); }); diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index bad3ac3a74c..5c8fbc9b2d3 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -1,4 +1,4 @@ -import { ComfyWidgets, addValueControlWidget } from "../../scripts/widgets.js"; +import { ComfyWidgets, addValueControlWidgets } from "../../scripts/widgets.js"; import { app } from "../../scripts/app.js"; const CONVERTED_TYPE = "converted-widget"; @@ -467,7 +467,11 @@ app.registerExtension({ if (!control_value) { control_value = "fixed"; } - addValueControlWidget(this, widget, control_value); + addValueControlWidgets(this, widget, control_value); + let filter = this.widgets_values?.[2]; + if(filter && this.widgets.length === 3) { + this.widgets[2].value = filter; + } } // When our value changes, update other widgets to reflect our changes diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index ccddc0bc44b..fbc1d0fc324 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -24,17 +24,58 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { } export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { - const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { + const widgets = addValueControlWidgets(node, targetWidget, defaultValue, values, { + addFilterList: false, + }); + return widgets[0]; +} + +export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", values, options) { + if (!options) options = {}; + + const widgets = []; + const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { values: ["fixed", "increment", "decrement", "randomize"], serialize: false, // Don't include this in prompt. }); - valueControl.afterQueued = () => { + widgets.push(valueControl); + + const isCombo = targetWidget.type === "combo"; + let comboFilter; + if (isCombo && options.addFilterList !== false) { + comboFilter = node.addWidget("string", "control_filter_list", "", function (v) {}, { + serialize: false, // Don't include this in prompt. + }); + widgets.push(comboFilter); + } + valueControl.afterQueued = () => { var v = valueControl.value; - if (targetWidget.type == "combo" && v !== "fixed") { - let current_index = targetWidget.options.values.indexOf(targetWidget.value); - let current_length = targetWidget.options.values.length; + if (isCombo && v !== "fixed") { + let values = targetWidget.options.values; + const filter = comboFilter?.value; + if (filter) { + let check; + if (filter.startsWith("/") && filter.endsWith("/")) { + try { + const regex = new RegExp(filter.substring(1, filter.length - 1)); + check = (item) => regex.test(item); + } catch (error) { + console.error("Error constructing RegExp filter for node " + node.id, filter, error); + } + } + if (!check) { + const lower = filter.toLocaleLowerCase(); + check = (item) => item.toLocaleLowerCase().includes(lower); + } + values = values.filter(item => check(item)); + if (!values.length && targetWidget.options.values.length) { + console.warn("Filter for node " + node.id + " has filtered out all items", filter); + } + } + let current_index = values.indexOf(targetWidget.value); + let current_length = values.length; switch (v) { case "increment": @@ -51,7 +92,7 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random current_index = Math.max(0, current_index); current_index = Math.min(current_length - 1, current_index); if (current_index >= 0) { - let value = targetWidget.options.values[current_index]; + let value = values[current_index]; targetWidget.value = value; targetWidget.callback(value); } @@ -88,7 +129,8 @@ export function addValueControlWidget(node, targetWidget, defaultValue = "random targetWidget.callback(targetWidget.value); } } - return valueControl; + + return widgets; }; function seedWidget(node, inputName, inputData, app) { From 32447f0c392be6a6b64fbac09fd7e7f33eb451f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 17:23:37 -0500 Subject: [PATCH 221/420] Add sampling_settings so models can specify specific sampling settings. --- comfy/model_sampling.py | 2 +- comfy/supported_models_base.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 37a3ac725c6..9e2a1c1afa6 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -24,7 +24,7 @@ def __init__(self, model_config=None): super().__init__() beta_schedule = "linear" if model_config is not None: - beta_schedule = model_config.beta_schedule + beta_schedule = model_config.sampling_settings.get("beta_schedule", beta_schedule) self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) self.sigma_data = 1.0 diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index b073eb4fc58..3412cfea030 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -19,7 +19,7 @@ class BASE: clip_prefix = [] clip_vision_prefix = None noise_aug_config = None - beta_schedule = "linear" + sampling_settings = {} latent_format = latent_formats.LatentFormat @classmethod From 410bf0777197c7005fe13aa4f6717d6dc63e2b22 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 22 Nov 2023 18:16:02 -0500 Subject: [PATCH 222/420] Make VAE memory estimation take dtype into account. --- comfy/sd.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index c006a0362c7..a8df3bdd449 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -155,8 +155,8 @@ def __init__(self, sd=None, device=None, config=None): if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) - self.memory_used_encode = lambda shape: (2078 * shape[2] * shape[3]) * 1.7 #These are for AutoencoderKL and need tweaking - self.memory_used_decode = lambda shape: (2562 * shape[2] * shape[3] * 64) * 1.7 + self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower) + self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) if config is None: if "taesd_decoder.1.weight" in sd: @@ -213,7 +213,7 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): def decode(self, samples_in): self.first_stage_model = self.first_stage_model.to(self.device) try: - memory_used = self.memory_used_decode(samples_in.shape) + memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) @@ -241,7 +241,7 @@ def encode(self, pixel_samples): self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: - memory_used = self.memory_used_encode(pixel_samples.shape) #NOTE: this constant along with the one in the decode above are estimated from the mem usage for the VAE and could change. + memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) model_management.free_memory(memory_used, self.device) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) From d03d8aa2e348c6ba3333150eb18aa76f5180a7f0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 01:09:15 -0500 Subject: [PATCH 223/420] Fix loading groups. --- web/lib/litegraph.core.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 0ca2038429e..f571edb30b8 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -4928,7 +4928,9 @@ LGraphNode.prototype.executeAction = function(action) this.title = o.title; this._bounding.set(o.bounding); this.color = o.color; - this.font_size = o.font_size; + if (o.font_size) { + this.font_size = o.font_size; + } }; LGraphGroup.prototype.serialize = function() { From 87031a1945278abe6b8a8058dfe6f38a5138655c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 11:59:11 -0500 Subject: [PATCH 224/420] Update readme with link to LCM example page. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d622c907209..f87c0404f74 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [unCLIP Models](https://comfyanonymous.github.io/ComfyUI_examples/unclip/) - [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) +- [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) - Starts up very fast. - Works fully offline: will never download anything. From a657f96c5cd9d72725352d6b00def82d9ce5d556 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 13:55:29 -0500 Subject: [PATCH 225/420] Add a node to save animated webp. --- comfy_extras/nodes_images.py | 76 ++++++++++++++++++++++++++++++++++++ web/scripts/pnginfo.js | 4 +- 2 files changed, 79 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 8cb322327b0..18c579190e4 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -1,4 +1,12 @@ import nodes +import folder_paths +from comfy.cli_args import args + +from PIL import Image +import numpy as np +import json +import os + MAX_RESOLUTION = nodes.MAX_RESOLUTION class ImageCrop: @@ -38,7 +46,75 @@ def repeat(self, image, amount): s = image.repeat((amount, 1,1,1)) return (s,) +class SaveAnimatedWEBP: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + methods = {"default": 4, "fastest": 0, "slowest": 6} + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "lossless": ("BOOLEAN", {"default": True}), + "quality": ("INT", {"default": 80, "min": 0, "max": 100}), + "method": (list(s.methods.keys()),), + # "num_frames": ("INT", {"default": 0, "min": 0, "max": 8192}), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "_for_testing" + + def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None): + method = self.methods.get(method, "aoeu") + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + pil_images = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + metadata = None + if not args.disable_metadata: + metadata = pil_images[0].getexif() + if prompt is not None: + metadata[0x0110] = "prompt:{}".format(json.dumps(prompt)) + if extra_pnginfo is not None: + inital_exif = 0x010f + for x in extra_pnginfo: + metadata[inital_exif] = "{}:{}".format(x, json.dumps(extra_pnginfo[x])) + inital_exif -= 1 + + if num_frames == 0: + num_frames = len(pil_images) + + c = len(pil_images) + for i in range(0, c, num_frames): + file = f"{filename}_{counter:05}_.webp" + pil_images[i].save(os.path.join(full_output_folder, file), save_all=True, duration=int(1000.0/fps), append_images=pil_images[i + 1:i + num_frames], exif=metadata, lossless=lossless, quality=quality, method=method) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + counter += 1 + + animated = num_frames != 1 + return { "ui": { "images": results, "animated": (animated,) } } + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, + "SaveAnimatedWEBP": SaveAnimatedWEBP, } diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index 491caed79f5..f8cbe7a3cd9 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -50,7 +50,6 @@ export function getPngMetadata(file) { function parseExifData(exifData) { // Check for the correct TIFF header (0x4949 for little-endian or 0x4D4D for big-endian) const isLittleEndian = new Uint16Array(exifData.slice(0, 2))[0] === 0x4949; - console.log(exifData); // Function to read 16-bit and 32-bit integers from binary data function readInt(offset, isLittleEndian, length) { @@ -126,6 +125,9 @@ export function getWebpMetadata(file) { const chunk_length = dataView.getUint32(offset + 4, true); const chunk_type = String.fromCharCode(...webp.slice(offset, offset + 4)); if (chunk_type === "EXIF") { + if (String.fromCharCode(...webp.slice(offset + 8, offset + 8 + 6)) == "Exif\0\0") { + offset += 6; + } let data = parseExifData(webp.slice(offset + 8, offset + 8 + chunk_length)); for (var key in data) { var value = data[key]; From 4d2437e68165cf12989dafe1ef0a26c3a0abc7f5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 23 Nov 2023 19:43:55 +0000 Subject: [PATCH 226/420] Call widget onRemove to remove element --- web/scripts/app.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 601e486e6e4..180416ef971 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -544,6 +544,7 @@ export class ComfyApp { } if (widgetIdx > -1) { + this.widgets[widgetIdx].onRemove?.(); this.widgets.splice(widgetIdx, 1); } From 022033a0e75901c7c357ab96e1c804fd5da05770 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 15:06:35 -0500 Subject: [PATCH 227/420] Fix SaveAnimatedWEBP not working when metadata is disabled. --- comfy_extras/nodes_images.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 18c579190e4..8c6ae538711 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -75,7 +75,7 @@ def INPUT_TYPES(s): CATEGORY = "_for_testing" def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None): - method = self.methods.get(method, "aoeu") + method = self.methods.get(method) filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) results = list() @@ -85,9 +85,8 @@ def save_images(self, images, fps, filename_prefix, lossless, quality, method, n img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) pil_images.append(img) - metadata = None + metadata = pil_images[0].getexif() if not args.disable_metadata: - metadata = pil_images[0].getexif() if prompt is not None: metadata[0x0110] = "prompt:{}".format(json.dumps(prompt)) if extra_pnginfo is not None: From 1964bf1e78dda9c6c7cf1b561068b835639aa166 Mon Sep 17 00:00:00 2001 From: Enrico Fasoli Date: Thu, 23 Nov 2023 22:24:58 +0100 Subject: [PATCH 228/420] fix: folder handling issues --- folder_paths.py | 5 ++++- nodes.py | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 4a38deec06f..7046255e422 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -38,7 +38,10 @@ filename_list_cache = {} if not os.path.exists(input_directory): - os.makedirs(input_directory) + try: + os.makedirs(input_directory) + except: + print("Failed to create input directory") def set_output_directory(output_dir): global output_directory diff --git a/nodes.py b/nodes.py index 2de468da709..27b8b1c1b80 100644 --- a/nodes.py +++ b/nodes.py @@ -1808,7 +1808,7 @@ def load_custom_nodes(): node_paths = folder_paths.get_folder_paths("custom_nodes") node_import_times = [] for custom_node_path in node_paths: - possible_modules = os.listdir(custom_node_path) + possible_modules = os.listdir(os.path.realpath(custom_node_path)) if "__pycache__" in possible_modules: possible_modules.remove("__pycache__") From 871cc20e13e9ef2629e3b5faa6af64207e86d6d2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 19:41:33 -0500 Subject: [PATCH 229/420] Support SVD img2vid model. --- comfy/cldm/cldm.py | 1 + comfy/ldm/modules/attention.py | 271 ++++++++++++-- .../modules/diffusionmodules/openaimodel.py | 348 +++++++++++++++--- comfy/ldm/modules/diffusionmodules/util.py | 69 +++- comfy/ldm/modules/temporal_ae.py | 244 ++++++++++++ comfy/model_base.py | 56 ++- comfy/model_detection.py | 18 +- comfy/model_sampling.py | 46 ++- comfy/sd.py | 10 +- comfy/supported_models.py | 36 +- comfy_extras/nodes_model_advanced.py | 31 ++ 11 files changed, 1030 insertions(+), 100 deletions(-) create mode 100644 comfy/ldm/modules/temporal_ae.py diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 9a63202ab07..76a525b378a 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -54,6 +54,7 @@ def __init__( transformer_depth_output=None, device=None, operations=comfy.ops, + **kwargs, ): super().__init__() assert use_spatial_transformer == True, "use_spatial_transformer has to be true" diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 016795a5974..947e2008cbd 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -5,8 +5,10 @@ from torch import nn, einsum from einops import rearrange, repeat from typing import Optional, Any +from functools import partial -from .diffusionmodules.util import checkpoint + +from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention from comfy import model_management @@ -370,21 +372,45 @@ def forward(self, x, context=None, value=None, mask=None): class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, - disable_self_attn=False, dtype=None, device=None, operations=comfy.ops): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None, + disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=comfy.ops): super().__init__() + + self.ff_in = ff_in or inner_dim is not None + if inner_dim is None: + inner_dim = dim + + self.is_res = inner_dim == dim + + if self.ff_in: + self.norm_in = nn.LayerNorm(dim, dtype=dtype, device=device) + self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) + self.disable_self_attn = disable_self_attn - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, + self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout, context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device) - self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device) - self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device) + self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) + + if disable_temporal_crossattention: + if switch_temporal_ca_to_sa: + raise ValueError + else: + self.attn2 = None + else: + context_dim_attn2 = None + if not switch_temporal_ca_to_sa: + context_dim_attn2 = context_dim + + self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2, + heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none + self.norm2 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + + self.norm1 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + self.norm3 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) self.checkpoint = checkpoint self.n_heads = n_heads self.d_head = d_head + self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa def forward(self, x, context=None, transformer_options={}): return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint) @@ -418,6 +444,12 @@ def _forward(self, x, context=None, transformer_options={}): else: transformer_patches_replace = {} + if self.ff_in: + x_skip = x + x = self.ff_in(self.norm_in(x)) + if self.is_res: + x += x_skip + n = self.norm1(x) if self.disable_self_attn: context_attn1 = context @@ -465,31 +497,34 @@ def _forward(self, x, context=None, transformer_options={}): for p in patch: x = p(x, extra_options) - n = self.norm2(x) - - context_attn2 = context - value_attn2 = None - if "attn2_patch" in transformer_patches: - patch = transformer_patches["attn2_patch"] - value_attn2 = context_attn2 - for p in patch: - n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options) - - attn2_replace_patch = transformer_patches_replace.get("attn2", {}) - block_attn2 = transformer_block - if block_attn2 not in attn2_replace_patch: - block_attn2 = block - - if block_attn2 in attn2_replace_patch: - if value_attn2 is None: + if self.attn2 is not None: + n = self.norm2(x) + if self.switch_temporal_ca_to_sa: + context_attn2 = n + else: + context_attn2 = context + value_attn2 = None + if "attn2_patch" in transformer_patches: + patch = transformer_patches["attn2_patch"] value_attn2 = context_attn2 - n = self.attn2.to_q(n) - context_attn2 = self.attn2.to_k(context_attn2) - value_attn2 = self.attn2.to_v(value_attn2) - n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) - n = self.attn2.to_out(n) - else: - n = self.attn2(n, context=context_attn2, value=value_attn2) + for p in patch: + n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options) + + attn2_replace_patch = transformer_patches_replace.get("attn2", {}) + block_attn2 = transformer_block + if block_attn2 not in attn2_replace_patch: + block_attn2 = block + + if block_attn2 in attn2_replace_patch: + if value_attn2 is None: + value_attn2 = context_attn2 + n = self.attn2.to_q(n) + context_attn2 = self.attn2.to_k(context_attn2) + value_attn2 = self.attn2.to_v(value_attn2) + n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options) + n = self.attn2.to_out(n) + else: + n = self.attn2(n, context=context_attn2, value=value_attn2) if "attn2_output_patch" in transformer_patches: patch = transformer_patches["attn2_output_patch"] @@ -497,7 +532,12 @@ def _forward(self, x, context=None, transformer_options={}): n = p(n, extra_options) x += n - x = self.ff(self.norm3(x)) + x + if self.is_res: + x_skip = x + x = self.ff(self.norm3(x)) + if self.is_res: + x += x_skip + return x @@ -565,3 +605,164 @@ def forward(self, x, context=None, transformer_options={}): x = self.proj_out(x) return x + x_in + +class SpatialVideoTransformer(SpatialTransformer): + def __init__( + self, + in_channels, + n_heads, + d_head, + depth=1, + dropout=0.0, + use_linear=False, + context_dim=None, + use_spatial_context=False, + timesteps=None, + merge_strategy: str = "fixed", + merge_factor: float = 0.5, + time_context_dim=None, + ff_in=False, + checkpoint=False, + time_depth=1, + disable_self_attn=False, + disable_temporal_crossattention=False, + max_time_embed_period: int = 10000, + dtype=None, device=None, operations=comfy.ops + ): + super().__init__( + in_channels, + n_heads, + d_head, + depth=depth, + dropout=dropout, + use_checkpoint=checkpoint, + context_dim=context_dim, + use_linear=use_linear, + disable_self_attn=disable_self_attn, + dtype=dtype, device=device, operations=operations + ) + self.time_depth = time_depth + self.depth = depth + self.max_time_embed_period = max_time_embed_period + + time_mix_d_head = d_head + n_time_mix_heads = n_heads + + time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads) + + inner_dim = n_heads * d_head + if use_spatial_context: + time_context_dim = context_dim + + self.time_stack = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + n_time_mix_heads, + time_mix_d_head, + dropout=dropout, + context_dim=time_context_dim, + # timesteps=timesteps, + checkpoint=checkpoint, + ff_in=ff_in, + inner_dim=time_mix_inner_dim, + disable_self_attn=disable_self_attn, + disable_temporal_crossattention=disable_temporal_crossattention, + dtype=dtype, device=device, operations=operations + ) + for _ in range(self.depth) + ] + ) + + assert len(self.time_stack) == len(self.transformer_blocks) + + self.use_spatial_context = use_spatial_context + self.in_channels = in_channels + + time_embed_dim = self.in_channels * 4 + self.time_pos_embed = nn.Sequential( + operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device), + nn.SiLU(), + operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device), + ) + + self.time_mixer = AlphaBlender( + alpha=merge_factor, merge_strategy=merge_strategy + ) + + def forward( + self, + x: torch.Tensor, + context: Optional[torch.Tensor] = None, + time_context: Optional[torch.Tensor] = None, + timesteps: Optional[int] = None, + image_only_indicator: Optional[torch.Tensor] = None, + transformer_options={} + ) -> torch.Tensor: + _, _, h, w = x.shape + x_in = x + spatial_context = None + if exists(context): + spatial_context = context + + if self.use_spatial_context: + assert ( + context.ndim == 3 + ), f"n dims of spatial context should be 3 but are {context.ndim}" + + if time_context is None: + time_context = context + time_context_first_timestep = time_context[::timesteps] + time_context = repeat( + time_context_first_timestep, "b ... -> (b n) ...", n=h * w + ) + elif time_context is not None and not self.use_spatial_context: + time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w) + if time_context.ndim == 2: + time_context = rearrange(time_context, "b c -> b 1 c") + + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = rearrange(x, "b c h w -> b (h w) c") + if self.use_linear: + x = self.proj_in(x) + + num_frames = torch.arange(timesteps, device=x.device) + num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps) + num_frames = rearrange(num_frames, "b t -> (b t)") + t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype) + emb = self.time_pos_embed(t_emb) + emb = emb[:, None, :] + + for it_, (block, mix_block) in enumerate( + zip(self.transformer_blocks, self.time_stack) + ): + transformer_options["block_index"] = it_ + x = block( + x, + context=spatial_context, + transformer_options=transformer_options, + ) + + x_mix = x + x_mix = x_mix + emb + + B, S, C = x_mix.shape + x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps) + x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options + x_mix = rearrange( + x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps + ) + + x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator) + + if self.use_linear: + x = self.proj_out(x) + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) + if not self.use_linear: + x = self.proj_out(x) + out = x + x_in + return out + + diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index e8f35a540fa..a497ed34478 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -5,6 +5,8 @@ import torch as th import torch.nn as nn import torch.nn.functional as F +from einops import rearrange +from functools import partial from .util import ( checkpoint, @@ -12,8 +14,9 @@ zero_module, normalization, timestep_embedding, + AlphaBlender, ) -from ..attention import SpatialTransformer +from ..attention import SpatialTransformer, SpatialVideoTransformer, default from comfy.ldm.util import exists import comfy.ops @@ -29,10 +32,15 @@ def forward(self, x, emb): """ #This is needed because accelerate makes a copy of transformer_options which breaks "current_index" -def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None): +def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None): for layer in ts: - if isinstance(layer, TimestepBlock): + if isinstance(layer, VideoResBlock): + x = layer(x, emb, num_video_frames, image_only_indicator) + elif isinstance(layer, TimestepBlock): x = layer(x, emb) + elif isinstance(layer, SpatialVideoTransformer): + x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options) + transformer_options["current_index"] += 1 elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) if "current_index" in transformer_options: @@ -145,6 +153,9 @@ def __init__( use_checkpoint=False, up=False, down=False, + kernel_size=3, + exchange_temb_dims=False, + skip_t_emb=False, dtype=None, device=None, operations=comfy.ops @@ -157,11 +168,17 @@ def __init__( self.use_conv = use_conv self.use_checkpoint = use_checkpoint self.use_scale_shift_norm = use_scale_shift_norm + self.exchange_temb_dims = exchange_temb_dims + + if isinstance(kernel_size, list): + padding = [k // 2 for k in kernel_size] + else: + padding = kernel_size // 2 self.in_layers = nn.Sequential( nn.GroupNorm(32, channels, dtype=dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device), + operations.conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device), ) self.updown = up or down @@ -175,19 +192,24 @@ def __init__( else: self.h_upd = self.x_upd = nn.Identity() - self.emb_layers = nn.Sequential( - nn.SiLU(), - operations.Linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device - ), - ) + self.skip_t_emb = skip_t_emb + if self.skip_t_emb: + self.emb_layers = None + self.exchange_temb_dims = False + else: + self.emb_layers = nn.Sequential( + nn.SiLU(), + operations.Linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, dtype=dtype, device=device + ), + ) self.out_layers = nn.Sequential( nn.GroupNorm(32, self.out_channels, dtype=dtype, device=device), nn.SiLU(), nn.Dropout(p=dropout), zero_module( - operations.conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1, dtype=dtype, device=device) + operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device) ), ) @@ -195,7 +217,7 @@ def __init__( self.skip_connection = nn.Identity() elif use_conv: self.skip_connection = operations.conv_nd( - dims, channels, self.out_channels, 3, padding=1, dtype=dtype, device=device + dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device ) else: self.skip_connection = operations.conv_nd(dims, channels, self.out_channels, 1, dtype=dtype, device=device) @@ -221,19 +243,110 @@ def _forward(self, x, emb): h = in_conv(h) else: h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] + + emb_out = None + if not self.skip_t_emb: + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] if self.use_scale_shift_norm: out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift + h = out_norm(h) + if emb_out is not None: + scale, shift = th.chunk(emb_out, 2, dim=1) + h *= (1 + scale) + h += shift h = out_rest(h) else: - h = h + emb_out + if emb_out is not None: + if self.exchange_temb_dims: + emb_out = rearrange(emb_out, "b t c ... -> b c t ...") + h = h + emb_out h = self.out_layers(h) return self.skip_connection(x) + h + +class VideoResBlock(ResBlock): + def __init__( + self, + channels: int, + emb_channels: int, + dropout: float, + video_kernel_size=3, + merge_strategy: str = "fixed", + merge_factor: float = 0.5, + out_channels=None, + use_conv: bool = False, + use_scale_shift_norm: bool = False, + dims: int = 2, + use_checkpoint: bool = False, + up: bool = False, + down: bool = False, + dtype=None, + device=None, + operations=comfy.ops + ): + super().__init__( + channels, + emb_channels, + dropout, + out_channels=out_channels, + use_conv=use_conv, + use_scale_shift_norm=use_scale_shift_norm, + dims=dims, + use_checkpoint=use_checkpoint, + up=up, + down=down, + dtype=dtype, + device=device, + operations=operations + ) + + self.time_stack = ResBlock( + default(out_channels, channels), + emb_channels, + dropout=dropout, + dims=3, + out_channels=default(out_channels, channels), + use_scale_shift_norm=False, + use_conv=False, + up=False, + down=False, + kernel_size=video_kernel_size, + use_checkpoint=use_checkpoint, + exchange_temb_dims=True, + dtype=dtype, + device=device, + operations=operations + ) + self.time_mixer = AlphaBlender( + alpha=merge_factor, + merge_strategy=merge_strategy, + rearrange_pattern="b t -> b 1 t 1 1", + ) + + def forward( + self, + x: th.Tensor, + emb: th.Tensor, + num_video_frames: int, + image_only_indicator = None, + ) -> th.Tensor: + x = super().forward(x, emb) + + x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) + x = rearrange(x, "(b t) c h w -> b c t h w", t=num_video_frames) + + x = self.time_stack( + x, rearrange(emb, "(b t) ... -> b t ...", t=num_video_frames) + ) + x = self.time_mixer( + x_spatial=x_mix, x_temporal=x, image_only_indicator=image_only_indicator + ) + x = rearrange(x, "b c t h w -> (b t) c h w") + return x + + class Timestep(nn.Module): def __init__(self, dim): super().__init__() @@ -310,6 +423,16 @@ def __init__( adm_in_channels=None, transformer_depth_middle=None, transformer_depth_output=None, + use_temporal_resblock=False, + use_temporal_attention=False, + time_context_dim=None, + extra_ff_mix_layer=False, + use_spatial_context=False, + merge_strategy=None, + merge_factor=0.0, + video_kernel_size=None, + disable_temporal_crossattention=False, + max_ddpm_temb_period=10000, device=None, operations=comfy.ops, ): @@ -364,8 +487,12 @@ def __init__( self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample + self.use_temporal_resblocks = use_temporal_resblock self.predict_codebook_ids = n_embed is not None + self.default_num_video_frames = None + self.default_image_only_indicator = None + time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( operations.Linear(model_channels, time_embed_dim, dtype=self.dtype, device=device), @@ -402,13 +529,104 @@ def __init__( input_block_chans = [model_channels] ch = model_channels ds = 1 + + def get_attention_layer( + ch, + num_heads, + dim_head, + depth=1, + context_dim=None, + use_checkpoint=False, + disable_self_attn=False, + ): + if use_temporal_attention: + return SpatialVideoTransformer( + ch, + num_heads, + dim_head, + depth=depth, + context_dim=context_dim, + time_context_dim=time_context_dim, + dropout=dropout, + ff_in=extra_ff_mix_layer, + use_spatial_context=use_spatial_context, + merge_strategy=merge_strategy, + merge_factor=merge_factor, + checkpoint=use_checkpoint, + use_linear=use_linear_in_transformer, + disable_self_attn=disable_self_attn, + disable_temporal_crossattention=disable_temporal_crossattention, + max_time_embed_period=max_ddpm_temb_period, + dtype=self.dtype, device=device, operations=operations + ) + else: + return SpatialTransformer( + ch, num_heads, dim_head, depth=depth, context_dim=context_dim, + disable_self_attn=disable_self_attn, use_linear=use_linear_in_transformer, + use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + ) + + def get_resblock( + merge_factor, + merge_strategy, + video_kernel_size, + ch, + time_embed_dim, + dropout, + out_channels, + dims, + use_checkpoint, + use_scale_shift_norm, + down=False, + up=False, + dtype=None, + device=None, + operations=comfy.ops + ): + if self.use_temporal_resblocks: + return VideoResBlock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + channels=ch, + emb_channels=time_embed_dim, + dropout=dropout, + out_channels=out_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=down, + up=up, + dtype=dtype, + device=device, + operations=operations + ) + else: + return ResBlock( + channels=ch, + emb_channels=time_embed_dim, + dropout=dropout, + out_channels=out_channels, + use_checkpoint=use_checkpoint, + dims=dims, + use_scale_shift_norm=use_scale_shift_norm, + down=down, + up=up, + dtype=dtype, + device=device, + operations=operations + ) + for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, @@ -435,11 +653,9 @@ def __init__( disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: - layers.append(SpatialTransformer( + layers.append(get_attention_layer( ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations - ) + disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch @@ -448,10 +664,13 @@ def __init__( out_ch = ch self.input_blocks.append( TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, @@ -481,10 +700,14 @@ def __init__( #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels mid_block = [ - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, + out_channels=None, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, @@ -493,15 +716,18 @@ def __init__( operations=operations )] if transformer_depth_middle >= 0: - mid_block += [SpatialTransformer( # always uses a self-attn + mid_block += [get_attention_layer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth_middle, context_dim=context_dim, - disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + disable_self_attn=disable_middle_self_attn, use_checkpoint=use_checkpoint ), - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, + out_channels=None, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, @@ -517,10 +743,13 @@ def __init__( for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch + ich, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, @@ -548,19 +777,21 @@ def __init__( if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( - SpatialTransformer( + get_attention_layer( ch, num_heads, dim_head, depth=num_transformers, context_dim=context_dim, - disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, - use_checkpoint=use_checkpoint, dtype=self.dtype, device=device, operations=operations + disable_self_attn=disabled_sa, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, + get_resblock( + merge_factor=merge_factor, + merge_strategy=merge_strategy, + video_kernel_size=video_kernel_size, + ch=ch, + time_embed_dim=time_embed_dim, + dropout=dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, @@ -602,6 +833,10 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo transformer_options["current_index"] = 0 transformer_patches = transformer_options.get("patches", {}) + num_video_frames = kwargs.get("num_video_frames", self.default_num_video_frames) + image_only_indicator = kwargs.get("image_only_indicator", self.default_image_only_indicator) + time_context = kwargs.get("time_context", None) + assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" @@ -616,7 +851,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo h = x.type(self.dtype) for id, module in enumerate(self.input_blocks): transformer_options["block"] = ("input", id) - h = forward_timestep_embed(module, h, emb, context, transformer_options) + h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = apply_control(h, control, 'input') if "input_block_patch" in transformer_patches: patch = transformer_patches["input_block_patch"] @@ -630,9 +865,10 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo h = p(h, transformer_options) transformer_options["block"] = ("middle", 0) - h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options) + h = forward_timestep_embed(self.middle_block, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = apply_control(h, control, 'middle') + for id, module in enumerate(self.output_blocks): transformer_options["block"] = ("output", id) hsp = hs.pop() @@ -649,7 +885,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo output_shape = hs[-1].shape else: output_shape = None - h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape) + h = forward_timestep_embed(module, h, emb, context, transformer_options, output_shape, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 0298ca99d4d..704bbe57450 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -13,11 +13,78 @@ import torch import torch.nn as nn import numpy as np -from einops import repeat +from einops import repeat, rearrange from comfy.ldm.util import instantiate_from_config import comfy.ops +class AlphaBlender(nn.Module): + strategies = ["learned", "fixed", "learned_with_images"] + + def __init__( + self, + alpha: float, + merge_strategy: str = "learned_with_images", + rearrange_pattern: str = "b t -> (b t) 1 1", + ): + super().__init__() + self.merge_strategy = merge_strategy + self.rearrange_pattern = rearrange_pattern + + assert ( + merge_strategy in self.strategies + ), f"merge_strategy needs to be in {self.strategies}" + + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif ( + self.merge_strategy == "learned" + or self.merge_strategy == "learned_with_images" + ): + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor: + # skip_time_mix = rearrange(repeat(skip_time_mix, 'b -> (b t) () () ()', t=t), '(b t) 1 ... -> b 1 t ...', t=t) + if self.merge_strategy == "fixed": + # make shape compatible + # alpha = repeat(self.mix_factor, '1 -> b () t () ()', t=t, b=bs) + alpha = self.mix_factor + elif self.merge_strategy == "learned": + alpha = torch.sigmoid(self.mix_factor) + # make shape compatible + # alpha = repeat(alpha, '1 -> s () ()', s = t * bs) + elif self.merge_strategy == "learned_with_images": + assert image_only_indicator is not None, "need image_only_indicator ..." + alpha = torch.where( + image_only_indicator.bool(), + torch.ones(1, 1, device=image_only_indicator.device), + rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"), + ) + alpha = rearrange(alpha, self.rearrange_pattern) + # make shape compatible + # alpha = repeat(alpha, '1 -> s () ()', s = t * bs) + else: + raise NotImplementedError() + return alpha + + def forward( + self, + x_spatial, + x_temporal, + image_only_indicator=None, + ) -> torch.Tensor: + alpha = self.get_alpha(image_only_indicator) + x = ( + alpha.to(x_spatial.dtype) * x_spatial + + (1.0 - alpha).to(x_spatial.dtype) * x_temporal + ) + return x + + def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if schedule == "linear": betas = ( diff --git a/comfy/ldm/modules/temporal_ae.py b/comfy/ldm/modules/temporal_ae.py new file mode 100644 index 00000000000..11ae049f3be --- /dev/null +++ b/comfy/ldm/modules/temporal_ae.py @@ -0,0 +1,244 @@ +import functools +from typing import Callable, Iterable, Union + +import torch +from einops import rearrange, repeat + +import comfy.ops + +from .diffusionmodules.model import ( + AttnBlock, + Decoder, + ResnetBlock, +) +from .diffusionmodules.openaimodel import ResBlock, timestep_embedding +from .attention import BasicTransformerBlock + +def partialclass(cls, *args, **kwargs): + class NewCls(cls): + __init__ = functools.partialmethod(cls.__init__, *args, **kwargs) + + return NewCls + + +class VideoResBlock(ResnetBlock): + def __init__( + self, + out_channels, + *args, + dropout=0.0, + video_kernel_size=3, + alpha=0.0, + merge_strategy="learned", + **kwargs, + ): + super().__init__(out_channels=out_channels, dropout=dropout, *args, **kwargs) + if video_kernel_size is None: + video_kernel_size = [3, 1, 1] + self.time_stack = ResBlock( + channels=out_channels, + emb_channels=0, + dropout=dropout, + dims=3, + use_scale_shift_norm=False, + use_conv=False, + up=False, + down=False, + kernel_size=video_kernel_size, + use_checkpoint=False, + skip_t_emb=True, + ) + + self.merge_strategy = merge_strategy + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif self.merge_strategy == "learned": + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def get_alpha(self, bs): + if self.merge_strategy == "fixed": + return self.mix_factor + elif self.merge_strategy == "learned": + return torch.sigmoid(self.mix_factor) + else: + raise NotImplementedError() + + def forward(self, x, temb, skip_video=False, timesteps=None): + b, c, h, w = x.shape + if timesteps is None: + timesteps = b + + x = super().forward(x, temb) + + if not skip_video: + x_mix = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + + x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + + x = self.time_stack(x, temb) + + alpha = self.get_alpha(bs=b // timesteps) + x = alpha * x + (1.0 - alpha) * x_mix + + x = rearrange(x, "b c t h w -> (b t) c h w") + return x + + +class AE3DConv(torch.nn.Conv2d): + def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs): + super().__init__(in_channels, out_channels, *args, **kwargs) + if isinstance(video_kernel_size, Iterable): + padding = [int(k // 2) for k in video_kernel_size] + else: + padding = int(video_kernel_size // 2) + + self.time_mix_conv = torch.nn.Conv3d( + in_channels=out_channels, + out_channels=out_channels, + kernel_size=video_kernel_size, + padding=padding, + ) + + def forward(self, input, timesteps=None, skip_video=False): + if timesteps is None: + timesteps = input.shape[0] + x = super().forward(input) + if skip_video: + return x + x = rearrange(x, "(b t) c h w -> b c t h w", t=timesteps) + x = self.time_mix_conv(x) + return rearrange(x, "b c t h w -> (b t) c h w") + + +class AttnVideoBlock(AttnBlock): + def __init__( + self, in_channels: int, alpha: float = 0, merge_strategy: str = "learned" + ): + super().__init__(in_channels) + # no context, single headed, as in base class + self.time_mix_block = BasicTransformerBlock( + dim=in_channels, + n_heads=1, + d_head=in_channels, + checkpoint=False, + ff_in=True, + ) + + time_embed_dim = self.in_channels * 4 + self.video_time_embed = torch.nn.Sequential( + comfy.ops.Linear(self.in_channels, time_embed_dim), + torch.nn.SiLU(), + comfy.ops.Linear(time_embed_dim, self.in_channels), + ) + + self.merge_strategy = merge_strategy + if self.merge_strategy == "fixed": + self.register_buffer("mix_factor", torch.Tensor([alpha])) + elif self.merge_strategy == "learned": + self.register_parameter( + "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) + ) + else: + raise ValueError(f"unknown merge strategy {self.merge_strategy}") + + def forward(self, x, timesteps=None, skip_time_block=False): + if skip_time_block: + return super().forward(x) + + if timesteps is None: + timesteps = x.shape[0] + + x_in = x + x = self.attention(x) + h, w = x.shape[2:] + x = rearrange(x, "b c h w -> b (h w) c") + + x_mix = x + num_frames = torch.arange(timesteps, device=x.device) + num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps) + num_frames = rearrange(num_frames, "b t -> (b t)") + t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False) + emb = self.video_time_embed(t_emb) # b, n_channels + emb = emb[:, None, :] + x_mix = x_mix + emb + + alpha = self.get_alpha() + x_mix = self.time_mix_block(x_mix, timesteps=timesteps) + x = alpha * x + (1.0 - alpha) * x_mix # alpha merge + + x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w) + x = self.proj_out(x) + + return x_in + x + + def get_alpha( + self, + ): + if self.merge_strategy == "fixed": + return self.mix_factor + elif self.merge_strategy == "learned": + return torch.sigmoid(self.mix_factor) + else: + raise NotImplementedError(f"unknown merge strategy {self.merge_strategy}") + + + +def make_time_attn( + in_channels, + attn_type="vanilla", + attn_kwargs=None, + alpha: float = 0, + merge_strategy: str = "learned", +): + return partialclass( + AttnVideoBlock, in_channels, alpha=alpha, merge_strategy=merge_strategy + ) + + +class Conv2DWrapper(torch.nn.Conv2d): + def forward(self, input: torch.Tensor, **kwargs) -> torch.Tensor: + return super().forward(input) + + +class VideoDecoder(Decoder): + available_time_modes = ["all", "conv-only", "attn-only"] + + def __init__( + self, + *args, + video_kernel_size: Union[int, list] = 3, + alpha: float = 0.0, + merge_strategy: str = "learned", + time_mode: str = "conv-only", + **kwargs, + ): + self.video_kernel_size = video_kernel_size + self.alpha = alpha + self.merge_strategy = merge_strategy + self.time_mode = time_mode + assert ( + self.time_mode in self.available_time_modes + ), f"time_mode parameter has to be in {self.available_time_modes}" + + if self.time_mode != "attn-only": + kwargs["conv_out_op"] = partialclass(AE3DConv, video_kernel_size=self.video_kernel_size) + if self.time_mode not in ["conv-only", "only-last-conv"]: + kwargs["attn_op"] = partialclass(make_time_attn, alpha=self.alpha, merge_strategy=self.merge_strategy) + if self.time_mode not in ["attn-only", "only-last-conv"]: + kwargs["resnet_op"] = partialclass(VideoResBlock, video_kernel_size=self.video_kernel_size, alpha=self.alpha, merge_strategy=self.merge_strategy) + + super().__init__(*args, **kwargs) + + def get_last_layer(self, skip_time_mix=False, **kwargs): + if self.time_mode == "attn-only": + raise NotImplementedError("TODO") + else: + return ( + self.conv_out.time_mix_conv.weight + if not skip_time_mix + else self.conv_out.weight + ) diff --git a/comfy/model_base.py b/comfy/model_base.py index 772e2693493..34274c4aeee 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -10,17 +10,22 @@ class ModelType(Enum): EPS = 1 V_PREDICTION = 2 + V_PREDICTION_EDM = 3 -from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete +from comfy.model_sampling import EPS, V_PREDICTION, ModelSamplingDiscrete, ModelSamplingContinuousEDM + def model_sampling(model_config, model_type): + s = ModelSamplingDiscrete + if model_type == ModelType.EPS: c = EPS elif model_type == ModelType.V_PREDICTION: c = V_PREDICTION - - s = ModelSamplingDiscrete + elif model_type == ModelType.V_PREDICTION_EDM: + c = V_PREDICTION + s = ModelSamplingContinuousEDM class ModelSampling(s, c): pass @@ -262,3 +267,48 @@ def encode_adm(self, **kwargs): out.append(self.embedder(torch.Tensor([target_width]))) flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0).repeat(clip_pooled.shape[0], 1) return torch.cat((clip_pooled.to(flat.device), flat), dim=1) + +class SVD_img2vid(BaseModel): + def __init__(self, model_config, model_type=ModelType.V_PREDICTION_EDM, device=None): + super().__init__(model_config, model_type, device=device) + self.embedder = Timestep(256) + + def encode_adm(self, **kwargs): + fps_id = kwargs.get("fps", 6) - 1 + motion_bucket_id = kwargs.get("motion_bucket_id", 127) + augmentation = kwargs.get("augmentation_level", 0) + + out = [] + out.append(self.embedder(torch.Tensor([fps_id]))) + out.append(self.embedder(torch.Tensor([motion_bucket_id]))) + out.append(self.embedder(torch.Tensor([augmentation]))) + + flat = torch.flatten(torch.cat(out)).unsqueeze(dim=0) + return flat + + def extra_conds(self, **kwargs): + out = {} + adm = self.encode_adm(**kwargs) + if adm is not None: + out['y'] = comfy.conds.CONDRegular(adm) + + latent_image = kwargs.get("concat_latent_image", None) + noise = kwargs.get("noise", None) + device = kwargs["device"] + + if latent_image is None: + latent_image = torch.zeros_like(noise) + + if latent_image.shape[1:] != noise.shape[1:]: + latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + latent_image = utils.repeat_to_batch_size(latent_image, noise.shape[0]) + + out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) + + if "time_conditioning" in kwargs: + out["time_context"] = comfy.conds.CONDCrossAttn(kwargs["time_conditioning"]) + + out['image_only_indicator'] = comfy.conds.CONDConstant(torch.zeros((1,), device=device)) + out['num_video_frames'] = comfy.conds.CONDConstant(noise.shape[0]) + return out diff --git a/comfy/model_detection.py b/comfy/model_detection.py index d65d91e7cb5..45d603a0c63 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -24,7 +24,8 @@ def calculate_transformer_depth(prefix, state_dict_keys, state_dict): last_transformer_depth = count_blocks(state_dict_keys, transformer_prefix + '{}') context_dim = state_dict['{}0.attn2.to_k.weight'.format(transformer_prefix)].shape[1] use_linear_in_transformer = len(state_dict['{}1.proj_in.weight'.format(prefix)].shape) == 2 - return last_transformer_depth, context_dim, use_linear_in_transformer + time_stack = '{}1.time_stack.0.attn1.to_q.weight'.format(prefix) in state_dict or '{}1.time_mix_blocks.0.attn1.to_q.weight'.format(prefix) in state_dict + return last_transformer_depth, context_dim, use_linear_in_transformer, time_stack return None def detect_unet_config(state_dict, key_prefix, dtype): @@ -57,6 +58,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): context_dim = None use_linear_in_transformer = False + video_model = False current_res = 1 count = 0 @@ -99,6 +101,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): if context_dim is None: context_dim = out[1] use_linear_in_transformer = out[2] + video_model = out[3] else: transformer_depth.append(0) @@ -127,6 +130,19 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config["transformer_depth_middle"] = transformer_depth_middle unet_config['use_linear_in_transformer'] = use_linear_in_transformer unet_config["context_dim"] = context_dim + + if video_model: + unet_config["extra_ff_mix_layer"] = True + unet_config["use_spatial_context"] = True + unet_config["merge_strategy"] = "learned_with_images" + unet_config["merge_factor"] = 0.0 + unet_config["video_kernel_size"] = [3, 1, 1] + unet_config["use_temporal_resblock"] = True + unet_config["use_temporal_attention"] = True + else: + unet_config["use_temporal_resblock"] = False + unet_config["use_temporal_attention"] = False + return unet_config def model_config_from_unet_config(unet_config): diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 9e2a1c1afa6..fac5c995e41 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -1,7 +1,7 @@ import torch import numpy as np from comfy.ldm.modules.diffusionmodules.util import make_beta_schedule - +import math class EPS: def calculate_input(self, sigma, noise): @@ -83,3 +83,47 @@ def percent_to_sigma(self, percent): percent = 1.0 - percent return self.sigma(torch.tensor(percent * 999.0)).item() + +class ModelSamplingContinuousEDM(torch.nn.Module): + def __init__(self, model_config=None): + super().__init__() + self.sigma_data = 1.0 + + if model_config is not None: + sampling_settings = model_config.sampling_settings + else: + sampling_settings = {} + + sigma_min = sampling_settings.get("sigma_min", 0.002) + sigma_max = sampling_settings.get("sigma_max", 120.0) + self.set_sigma_range(sigma_min, sigma_max) + + def set_sigma_range(self, sigma_min, sigma_max): + sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp() + + self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers + self.register_buffer('log_sigmas', sigmas.log()) + + @property + def sigma_min(self): + return self.sigmas[0] + + @property + def sigma_max(self): + return self.sigmas[-1] + + def timestep(self, sigma): + return 0.25 * sigma.log() + + def sigma(self, timestep): + return (timestep / 0.25).exp() + + def percent_to_sigma(self, percent): + if percent <= 0.0: + return 999999999.9 + if percent >= 1.0: + return 0.0 + percent = 1.0 - percent + + log_sigma_min = math.log(self.sigma_min) + return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min) diff --git a/comfy/sd.py b/comfy/sd.py index a8df3bdd449..7f85540c4eb 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -159,7 +159,15 @@ def __init__(self, sd=None, device=None, config=None): self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) if config is None: - if "taesd_decoder.1.weight" in sd: + if "decoder.mid.block_1.mix_factor" in sd: + encoder_config = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + decoder_config = encoder_config.copy() + decoder_config["video_kernel_size"] = [3, 1, 1] + decoder_config["alpha"] = 0.0 + self.first_stage_model = AutoencodingEngine(regularizer_config={'target': "comfy.ldm.models.autoencoder.DiagonalGaussianRegularizer"}, + encoder_config={'target': "comfy.ldm.modules.diffusionmodules.model.Encoder", 'params': encoder_config}, + decoder_config={'target': "comfy.ldm.modules.temporal_ae.VideoDecoder", 'params': decoder_config}) + elif "taesd_decoder.1.weight" in sd: self.first_stage_model = comfy.taesd.taesd.TAESD() else: #default SD1.x/SD2.x VAE parameters diff --git a/comfy/supported_models.py b/comfy/supported_models.py index fdd4ea4f5c2..7e2ac677d51 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -17,6 +17,7 @@ class SD15(supported_models_base.BASE): "model_channels": 320, "use_linear_in_transformer": False, "adm_in_channels": None, + "use_temporal_attention": False, } unet_extra_config = { @@ -56,6 +57,7 @@ class SD20(supported_models_base.BASE): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": None, + "use_temporal_attention": False, } latent_format = latent_formats.SD15 @@ -88,6 +90,7 @@ class SD21UnclipL(SD20): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": 1536, + "use_temporal_attention": False, } clip_vision_prefix = "embedder.model.visual." @@ -100,6 +103,7 @@ class SD21UnclipH(SD20): "model_channels": 320, "use_linear_in_transformer": True, "adm_in_channels": 2048, + "use_temporal_attention": False, } clip_vision_prefix = "embedder.model.visual." @@ -112,6 +116,7 @@ class SDXLRefiner(supported_models_base.BASE): "context_dim": 1280, "adm_in_channels": 2560, "transformer_depth": [0, 0, 4, 4, 4, 4, 0, 0], + "use_temporal_attention": False, } latent_format = latent_formats.SDXL @@ -148,7 +153,8 @@ class SDXL(supported_models_base.BASE): "use_linear_in_transformer": True, "transformer_depth": [0, 0, 2, 2, 10, 10], "context_dim": 2048, - "adm_in_channels": 2816 + "adm_in_channels": 2816, + "use_temporal_attention": False, } latent_format = latent_formats.SDXL @@ -203,8 +209,34 @@ class SSD1B(SDXL): "use_linear_in_transformer": True, "transformer_depth": [0, 0, 2, 2, 4, 4], "context_dim": 2048, - "adm_in_channels": 2816 + "adm_in_channels": 2816, + "use_temporal_attention": False, } +class SVD_img2vid(supported_models_base.BASE): + unet_config = { + "model_channels": 320, + "in_channels": 8, + "use_linear_in_transformer": True, + "transformer_depth": [1, 1, 1, 1, 1, 1, 0, 0], + "context_dim": 1024, + "adm_in_channels": 768, + "use_temporal_attention": True, + "use_temporal_resblock": True + } + + clip_vision_prefix = "conditioner.embedders.0.open_clip.model.visual." + + latent_format = latent_formats.SD15 + + sampling_settings = {"sigma_max": 700.0, "sigma_min": 0.002} + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.SVD_img2vid(self, device=device) + return out + + def clip_target(self): + return None models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B] +models += [SVD_img2vid] diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 0f4ddd9c340..6991c983728 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -128,6 +128,36 @@ class ModelSamplingAdvanced(sampling_base, sampling_type): m.add_object_patch("model_sampling", model_sampling) return (m, ) +class ModelSamplingContinuousEDM: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "sampling": (["v_prediction", "eps"],), + "sigma_max": ("FLOAT", {"default": 120.0, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), + "sigma_min": ("FLOAT", {"default": 0.002, "min": 0.0, "max": 1000.0, "step":0.001, "round": False}), + }} + + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "advanced/model" + + def patch(self, model, sampling, sigma_max, sigma_min): + m = model.clone() + + if sampling == "eps": + sampling_type = comfy.model_sampling.EPS + elif sampling == "v_prediction": + sampling_type = comfy.model_sampling.V_PREDICTION + + class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type): + pass + + model_sampling = ModelSamplingAdvanced() + model_sampling.set_sigma_range(sigma_min, sigma_max) + m.add_object_patch("model_sampling", model_sampling) + return (m, ) + class RescaleCFG: @classmethod def INPUT_TYPES(s): @@ -169,5 +199,6 @@ def rescale_cfg(args): NODE_CLASS_MAPPINGS = { "ModelSamplingDiscrete": ModelSamplingDiscrete, + "ModelSamplingContinuousEDM": ModelSamplingContinuousEDM, "RescaleCFG": RescaleCFG, } From 42dfae63312f443d13841a0c4a5de467f5c354c9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 19:43:09 -0500 Subject: [PATCH 230/420] Nodes to properly use the SDV img2vid checkpoint. The img2vid model is conditioned on clip vision output only which means there's no CLIP model which is why I added a ImageOnlyCheckpointLoader to load it. Note that the unClipCheckpointLoader can also load it because it also has a CLIP_VISION output. SDV_img2vid_Conditioning is the node used to pass the right conditioning to the img2vid model. VideoLinearCFGGuidance applies a linearly decreasing CFG scale to each video frame from the cfg set in the sampler node to min_cfg. SDV_img2vid_Conditioning can be found in conditioning->video_models ImageOnlyCheckpointLoader can be found in loaders->video_models VideoLinearCFGGuidance can be found in sampling->video_models --- comfy_extras/nodes_video_model.py | 89 +++++++++++++++++++++++++++++++ nodes.py | 1 + 2 files changed, 90 insertions(+) create mode 100644 comfy_extras/nodes_video_model.py diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py new file mode 100644 index 00000000000..92bd883aebc --- /dev/null +++ b/comfy_extras/nodes_video_model.py @@ -0,0 +1,89 @@ +import nodes +import torch +import comfy.utils +import comfy.sd +import folder_paths + + +class ImageOnlyCheckpointLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "ckpt_name": (folder_paths.get_filename_list("checkpoints"), ), + }} + RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE") + FUNCTION = "load_checkpoint" + + CATEGORY = "loaders/video_models" + + def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): + ckpt_path = folder_paths.get_full_path("checkpoints", ckpt_name) + out = comfy.sd.load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=False, output_clipvision=True, embedding_directory=folder_paths.get_folder_paths("embeddings")) + return (out[0], out[3], out[2]) + + +class SDV_img2vid_Conditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 1024, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 576, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "video_frames": ("INT", {"default": 14, "min": 1, "max": 4096}), + "motion_bucket_id": ("INT", {"default": 127, "min": 1, "max": 1023}), + "fps": ("INT", {"default": 6, "min": 1, "max": 1024}), + "augmentation_level": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}) + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/video_models" + + def encode(self, clip_vision, init_image, vae, width, height, video_frames, motion_bucket_id, fps, augmentation_level): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + if augmentation_level > 0: + encode_pixels += torch.randn_like(pixels) * augmentation_level + t = vae.encode(encode_pixels) + positive = [[pooled, {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": t}]] + negative = [[torch.zeros_like(pooled), {"motion_bucket_id": motion_bucket_id, "fps": fps, "augmentation_level": augmentation_level, "concat_latent_image": torch.zeros_like(t)}]] + latent = torch.zeros([video_frames, 4, height // 8, width // 8]) + return (positive, negative, {"samples":latent}) + +class VideoLinearCFGGuidance: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "min_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.5, "round": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "sampling/video_models" + + def patch(self, model, min_cfg): + def linear_cfg(args): + cond = args["cond"] + uncond = args["uncond"] + cond_scale = args["cond_scale"] + + scale = torch.linspace(min_cfg, cond_scale, cond.shape[0], device=cond.device).reshape((cond.shape[0], 1, 1, 1)) + return uncond + scale * (cond - uncond) + + m = model.clone() + m.set_model_sampler_cfg_function(linear_cfg) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, + "SDV_img2vid_Conditioning": SDV_img2vid_Conditioning, + "VideoLinearCFGGuidance": VideoLinearCFGGuidance, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "ImageOnlyCheckpointLoader": "Image Only Checkpoint Loader (img2vid model)", +} diff --git a/nodes.py b/nodes.py index 2de468da709..bb24bc6e897 100644 --- a/nodes.py +++ b/nodes.py @@ -1850,6 +1850,7 @@ def init_custom_nodes(): "nodes_model_advanced.py", "nodes_model_downscale.py", "nodes_images.py", + "nodes_video_model.py", ] for node_file in extras_files: From 02ffbb2de3e33d9d64d38c13e70e860d9af90101 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 23 Nov 2023 23:20:07 -0500 Subject: [PATCH 231/420] Fix typo. --- comfy_extras/nodes_video_model.py | 4 ++-- web/scripts/app.js | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py index 92bd883aebc..26a717a3836 100644 --- a/comfy_extras/nodes_video_model.py +++ b/comfy_extras/nodes_video_model.py @@ -21,7 +21,7 @@ def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): return (out[0], out[3], out[2]) -class SDV_img2vid_Conditioning: +class SVD_img2vid_Conditioning: @classmethod def INPUT_TYPES(s): return {"required": { "clip_vision": ("CLIP_VISION",), @@ -80,7 +80,7 @@ def linear_cfg(args): NODE_CLASS_MAPPINGS = { "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, - "SDV_img2vid_Conditioning": SDV_img2vid_Conditioning, + "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning, "VideoLinearCFGGuidance": VideoLinearCFGGuidance, } diff --git a/web/scripts/app.js b/web/scripts/app.js index 180416ef971..cd20c40fd0a 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1523,6 +1523,7 @@ export class ComfyApp { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader"; if (n.type == "ConditioningAverage ") n.type = "ConditioningAverage"; //typo fix + if (n.type == "SDV_img2vid_Conditioning") n.type = "SVD_img2vid_Conditioning"; //typo fix // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { From c782cf3ea95021b0d9fa95014b13e7c32f20fd6e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 00:27:08 -0500 Subject: [PATCH 232/420] Add to Readme that Stable Video Diffusion is supported. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f87c0404f74..9d7e317907f 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin ## Features - Nodes/graph/flowchart interface to experiment and create complex Stable Diffusion workflows without needing to code anything. -- Fully supports SD1.x, SD2.x and SDXL +- Fully supports SD1.x, SD2.x, [SDXL](https://comfyanonymous.github.io/ComfyUI_examples/sdxl/) and [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/) - Asynchronous Queue system - Many optimizations: Only re-executes the parts of the workflow that changes between executions. - Command line option: ```--lowvram``` to make it work on GPUs with less than 3GB vram (enabled automatically on GPUs with low vram) From 982338b9bb41301000ddac46d67103af9d0582cd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 02:08:08 -0500 Subject: [PATCH 233/420] Fix issue loading webp files in UI. --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 6f01aa5b245..8a58d30b3a7 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -599,7 +599,7 @@ export class ComfyUI { const fileInput = $el("input", { id: "comfy-file-input", type: "file", - accept: ".json,image/png,.latent,.safetensors", + accept: ".json,image/png,.latent,.safetensors,image/webp", style: {display: "none"}, parent: document.body, onchange: () => { From 3e5ea74ad356e849ea27f1d766a7b6d90a5acfda Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 03:55:35 -0500 Subject: [PATCH 234/420] Make buggy xformers fall back on pytorch attention. --- comfy/ldm/modules/attention.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 947e2008cbd..d511dda16e8 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -278,9 +278,20 @@ def attention_split(q, k, v, heads, mask=None): ) return r1 +BROKEN_XFORMERS = False +try: + x_vers = xformers.__version__ + #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error) + BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23") +except: + pass + def attention_xformers(q, k, v, heads, mask=None): b, _, dim_head = q.shape dim_head //= heads + if BROKEN_XFORMERS: + if b * heads > 65535: + return attention_pytorch(q, k, v, heads, mask) q, k, v = map( lambda t: t.unsqueeze(3) From eff24ea6aa4f53870f575ec34371b7db940c1cfc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 11:12:10 -0500 Subject: [PATCH 235/420] Add a node to save animated PNG files. These work in ffpmeg unlike webp. --- comfy_extras/nodes_images.py | 56 ++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 8c6ae538711..450c8dc40dd 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -3,6 +3,8 @@ from comfy.cli_args import args from PIL import Image +from PIL.PngImagePlugin import PngInfo + import numpy as np import json import os @@ -112,8 +114,62 @@ def save_images(self, images, fps, filename_prefix, lossless, quality, method, n animated = num_frames != 1 return { "ui": { "images": results, "animated": (animated,) } } +class SaveAnimatedPNG: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"}), + "fps": ("FLOAT", {"default": 12.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "compress_level": ("INT", {"default": 4, "min": 0, "max": 9}) + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images" + + OUTPUT_NODE = True + + CATEGORY = "_for_testing" + + def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + pil_images = [] + for image in images: + i = 255. * image.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + pil_images.append(img) + + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + file = f"{filename}_{counter:05}_.png" + pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results, "animated": (True,)} } + NODE_CLASS_MAPPINGS = { "ImageCrop": ImageCrop, "RepeatImageBatch": RepeatImageBatch, "SaveAnimatedWEBP": SaveAnimatedWEBP, + "SaveAnimatedPNG": SaveAnimatedPNG, } From 916e9c998c5952a30e7795ccfda74186a82a2a06 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 11:19:23 -0500 Subject: [PATCH 236/420] Use same default fps as webp node. --- comfy_extras/nodes_images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 450c8dc40dd..4c86b2df651 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -125,7 +125,7 @@ def INPUT_TYPES(s): return {"required": {"images": ("IMAGE", ), "filename_prefix": ("STRING", {"default": "ComfyUI"}), - "fps": ("FLOAT", {"default": 12.0, "min": 0.01, "max": 1000.0, "step": 0.01}), + "fps": ("FLOAT", {"default": 6.0, "min": 0.01, "max": 1000.0, "step": 0.01}), "compress_level": ("INT", {"default": 4, "min": 0, "max": 9}) }, "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, From 8ad5d494d52883e02f5745603dfd06f1a49c040b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 18:14:17 -0500 Subject: [PATCH 237/420] Fix APNG not working in ffmpeg. --- comfy_extras/nodes_images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 4c86b2df651..4b6cd3d1b7f 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -152,10 +152,10 @@ def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", pr if not args.disable_metadata: metadata = PngInfo() if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) + metadata.add(b"tEXt", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) if extra_pnginfo is not None: for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) + metadata.add(b"tEXt", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) file = f"{filename}_{counter:05}_.png" pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) From e020ab61f97fd8bccc31e7eebd23acd5dd9e2ecd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 18:24:19 -0500 Subject: [PATCH 238/420] Fix output APNG not working with ffmpeg. --- comfy_extras/nodes_images.py | 4 ++-- web/scripts/pnginfo.js | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 4b6cd3d1b7f..5ad2235a523 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -152,10 +152,10 @@ def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", pr if not args.disable_metadata: metadata = PngInfo() if prompt is not None: - metadata.add(b"tEXt", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) + metadata.add(b"comf", "prompt".encode("latin-1", "strict") + b"\0" + json.dumps(prompt).encode("latin-1", "strict"), after_idat=True) if extra_pnginfo is not None: for x in extra_pnginfo: - metadata.add(b"tEXt", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) + metadata.add(b"comf", x.encode("latin-1", "strict") + b"\0" + json.dumps(extra_pnginfo[x]).encode("latin-1", "strict"), after_idat=True) file = f"{filename}_{counter:05}_.png" pil_images[0].save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=compress_level, save_all=True, duration=int(1000.0/fps), append_images=pil_images[1:]) diff --git a/web/scripts/pnginfo.js b/web/scripts/pnginfo.js index f8cbe7a3cd9..83a4ebc86c4 100644 --- a/web/scripts/pnginfo.js +++ b/web/scripts/pnginfo.js @@ -24,7 +24,7 @@ export function getPngMetadata(file) { const length = dataView.getUint32(offset); // Get the chunk type const type = String.fromCharCode(...pngData.slice(offset + 4, offset + 8)); - if (type === "tEXt") { + if (type === "tEXt" || type == "comf") { // Get the keyword let keyword_end = offset + 8; while (pngData[keyword_end] !== 0) { From 5d6dfce5481f67bcfb30b1b39ad6eb78022653af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 24 Nov 2023 20:35:29 -0500 Subject: [PATCH 239/420] Fix importing diffusers unets. --- comfy/model_detection.py | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index 45d603a0c63..c682c3e1a18 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -232,52 +232,62 @@ def unet_config_from_diffusers_unet(state_dict, dtype): SDXL = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_refiner = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2560, 'dtype': dtype, 'in_channels': 4, 'model_channels': 384, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [0, 0, 4, 4, 4, 4, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 4, - 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1280, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 4, 4, 4, 4, 4, 4, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': True, - 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21_uncliph = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2048, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD21_unclipl = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 1536, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'use_linear_in_transformer': True, 'context_dim': 1024, 'num_head_channels': 64, 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SD15 = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'adm_in_channels': None, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2, 2], 'transformer_depth': [1, 1, 1, 1, 1, 1, 0, 0], 'channel_mult': [1, 2, 4, 4], 'transformer_depth_middle': 1, 'use_linear_in_transformer': False, 'context_dim': 768, 'num_heads': 8, - 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]} + 'transformer_depth_output': [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_mid_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 1, 1], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 1, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 1, 1, 1], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_small_cnet = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 0, 0, 0, 0], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 0, - 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0]} + 'use_linear_in_transformer': True, 'num_head_channels': 64, 'context_dim': 1, 'transformer_depth_output': [0, 0, 0, 0, 0, 0, 0, 0, 0], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SDXL_diffusers_inpaint = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 9, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 10, 10], 'channel_mult': [1, 2, 4], 'transformer_depth_middle': 10, - 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10]} + 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'transformer_depth_output': [0, 0, 0, 2, 2, 2, 10, 10, 10], + 'use_temporal_attention': False, 'use_temporal_resblock': False} SSD_1B = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 2, 2, 4, 4], 'transformer_depth_output': [0, 0, 0, 1, 1, 2, 10, 4, 4], - 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64} + 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, + 'use_temporal_attention': False, 'use_temporal_resblock': False} supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B] From 5b37270d3ad2227a30e15101a8d528ca77bd589d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 25 Nov 2023 02:26:50 -0500 Subject: [PATCH 240/420] Add a lora loader node for models with no CLIP. --- nodes.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nodes.py b/nodes.py index bb24bc6e897..df40f809456 100644 --- a/nodes.py +++ b/nodes.py @@ -572,6 +572,19 @@ def load_lora(self, model, clip, lora_name, strength_model, strength_clip): model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip) return (model_lora, clip_lora) +class LoraLoaderModelOnly(LoraLoader): + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "lora_name": (folder_paths.get_filename_list("loras"), ), + "strength_model": ("FLOAT", {"default": 1.0, "min": -20.0, "max": 20.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_lora_model_only" + + def load_lora_model_only(self, model, lora_name, strength_model): + return (self.load_lora(model, None, lora_name, strength_model, 0)[0],) + class VAELoader: @staticmethod def vae_list(): @@ -1703,6 +1716,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "ConditioningZeroOut": ConditioningZeroOut, "ConditioningSetTimestepRange": ConditioningSetTimestepRange, + "LoraLoaderModelOnly": LoraLoaderModelOnly, } NODE_DISPLAY_NAME_MAPPINGS = { From 50dc39d6ec5420f35b81f965c106b6710ff48e6e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 26 Nov 2023 03:13:56 -0500 Subject: [PATCH 241/420] Clean up the extra_options dict for the transformer patches. Now everything in transformer_options gets put in extra_options. --- comfy/ldm/modules/attention.py | 31 ++++++------------- .../modules/diffusionmodules/openaimodel.py | 11 ++++--- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index d511dda16e8..7dc1a1b5ceb 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -430,31 +430,20 @@ def _forward(self, x, context=None, transformer_options={}): extra_options = {} block = None block_index = 0 - if "current_index" in transformer_options: - extra_options["transformer_index"] = transformer_options["current_index"] - if "block_index" in transformer_options: - block_index = transformer_options["block_index"] - extra_options["block_index"] = block_index - if "original_shape" in transformer_options: - extra_options["original_shape"] = transformer_options["original_shape"] - if "block" in transformer_options: - block = transformer_options["block"] - extra_options["block"] = block - if "cond_or_uncond" in transformer_options: - extra_options["cond_or_uncond"] = transformer_options["cond_or_uncond"] - if "patches" in transformer_options: - transformer_patches = transformer_options["patches"] - else: - transformer_patches = {} + transformer_patches = {} + transformer_patches_replace = {} + + for k in transformer_options: + if k == "patches": + transformer_patches = transformer_options[k] + elif k == "patches_replace": + transformer_patches_replace = transformer_options[k] + else: + extra_options[k] = transformer_options[k] extra_options["n_heads"] = self.n_heads extra_options["dim_head"] = self.d_head - if "patches_replace" in transformer_options: - transformer_patches_replace = transformer_options["patches_replace"] - else: - transformer_patches_replace = {} - if self.ff_in: x_skip = x x = self.ff_in(self.norm_in(x)) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index a497ed34478..48264892c26 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -31,7 +31,7 @@ def forward(self, x, emb): Apply the module to `x` given `emb` timestep embeddings. """ -#This is needed because accelerate makes a copy of transformer_options which breaks "current_index" +#This is needed because accelerate makes a copy of transformer_options which breaks "transformer_index" def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, output_shape=None, time_context=None, num_video_frames=None, image_only_indicator=None): for layer in ts: if isinstance(layer, VideoResBlock): @@ -40,11 +40,12 @@ def forward_timestep_embed(ts, x, emb, context=None, transformer_options={}, out x = layer(x, emb) elif isinstance(layer, SpatialVideoTransformer): x = layer(x, context, time_context, num_video_frames, image_only_indicator, transformer_options) - transformer_options["current_index"] += 1 + if "transformer_index" in transformer_options: + transformer_options["transformer_index"] += 1 elif isinstance(layer, SpatialTransformer): x = layer(x, context, transformer_options) - if "current_index" in transformer_options: - transformer_options["current_index"] += 1 + if "transformer_index" in transformer_options: + transformer_options["transformer_index"] += 1 elif isinstance(layer, Upsample): x = layer(x, output_shape=output_shape) else: @@ -830,7 +831,7 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo :return: an [N x C x ...] Tensor of outputs. """ transformer_options["original_shape"] = list(x.shape) - transformer_options["current_index"] = 0 + transformer_options["transformer_index"] = 0 transformer_patches = transformer_options.get("patches", {}) num_video_frames = kwargs.get("num_video_frames", self.default_num_video_frames) From 39e75862b248a20e8233ccee743ba5b2e977cdcf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 26 Nov 2023 03:43:02 -0500 Subject: [PATCH 242/420] Fix regression from last commit. --- comfy/ldm/modules/attention.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 7dc1a1b5ceb..f684523823d 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -428,8 +428,8 @@ def forward(self, x, context=None, transformer_options={}): def _forward(self, x, context=None, transformer_options={}): extra_options = {} - block = None - block_index = 0 + block = transformer_options.get("block", None) + block_index = transformer_options.get("block_index", 0) transformer_patches = {} transformer_patches_replace = {} From 6aa1bcd601dfdcb4485ea31947ffbf992a5b54fc Mon Sep 17 00:00:00 2001 From: Jack Bauer <2308123+dmx974@users.noreply.github.com> Date: Sun, 26 Nov 2023 17:23:11 +0400 Subject: [PATCH 243/420] Remove hard coded max_items in history API --- web/scripts/api.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/scripts/api.js b/web/scripts/api.js index de56b23108b..9aa7528af04 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -254,9 +254,9 @@ class ComfyApi extends EventTarget { * Gets the prompt execution history * @returns Prompt history including node outputs */ - async getHistory() { + async getHistory(max_items=200) { try { - const res = await this.fetchApi("/history?max_items=200"); + const res = await this.fetchApi(`/history?max_items=${max_items}`); return { History: Object.values(await res.json()) }; } catch (error) { console.error(error); From edd6f75d3ad243e6c2d38f2d94191da40d12b2f3 Mon Sep 17 00:00:00 2001 From: David Jeske Date: Sun, 26 Nov 2023 13:10:31 -0700 Subject: [PATCH 244/420] better error for invalid output paths --- folder_paths.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 4a38deec06f..5479fd7b2b1 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -228,8 +228,12 @@ def compute_vars(input, image_width, image_height): full_output_folder = os.path.join(output_dir, subfolder) if os.path.commonpath((output_dir, os.path.abspath(full_output_folder))) != output_dir: - print("Saving image outside the output folder is not allowed.") - return {} + err = "**** ERROR: Saving image outside the output folder is not allowed." + \ + "\n full_output_folder: " + os.path.abspath(full_output_folder) + \ + "\n output_dir: " + output_dir + \ + "\n commonpath: " + os.path.commonpath((output_dir, os.path.abspath(full_output_folder))) + print(err) + raise Exception(err) try: counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1 From 34eccd863bb41f48346de178a55be308dc36e5e5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 27 Nov 2023 14:00:15 +0000 Subject: [PATCH 245/420] Add simple undo redo history --- web/extensions/core/undoRedo.js | 150 ++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 web/extensions/core/undoRedo.js diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js new file mode 100644 index 00000000000..1c1d785a8e9 --- /dev/null +++ b/web/extensions/core/undoRedo.js @@ -0,0 +1,150 @@ +import { app } from "../../scripts/app.js"; + +const MAX_HISTORY = 50; + +let undo = []; +let redo = []; +let activeState = null; +let isOurLoad = false; +function checkState() { + const currentState = app.graph.serialize(); + if (!graphEqual(activeState, currentState)) { + undo.push(activeState); + if(undo.length > MAX_HISTORY) { + undo.shift(); + } + activeState = clone(currentState); + redo.length = 0; + } +} + +const loadGraphData = app.loadGraphData; +app.loadGraphData = async function () { + const v = await loadGraphData.apply(this, arguments); + if (isOurLoad) { + isOurLoad = false; + } else { + checkState(); + } + return v; +}; + +function clone(obj) { + try { + if (typeof structuredClone !== "undefined") { + return structuredClone(obj); + } + } catch (error) { + // structuredClone is stricter than using JSON.parse/stringify so fallback to that + } + + return JSON.parse(JSON.stringify(obj)); +} + +function graphEqual(a, b, root = true) { + if (a === b) return true; + + if (typeof a == "object" && a && typeof b == "object" && b) { + const keys = Object.getOwnPropertyNames(a); + + if (keys.length != Object.getOwnPropertyNames(b).length) { + return false; + } + + for (const key of keys) { + let av = a[key]; + let bv = b[key]; + if (root && key === "nodes") { + // Nodes need to be sorted as the order changes when selecting nodes + av = [...av].sort((a, b) => a.id - b.id); + bv = [...bv].sort((a, b) => a.id - b.id); + } + if (!graphEqual(av, bv, false)) { + return false; + } + } + + return true; + } + + return false; +} + +const undoRedo = async (e) => { + if (e.ctrlKey || e.metaKey) { + if (e.key === "y") { + const prevState = redo.pop(); + if (prevState) { + undo.push(activeState); + isOurLoad = true; + await app.loadGraphData(prevState); + activeState = prevState; + } + return true; + } else if (e.key === "z") { + const prevState = undo.pop(); + if (prevState) { + redo.push(activeState); + isOurLoad = true; + await app.loadGraphData(prevState); + activeState = prevState; + } + return true; + } + } +}; + +const bindInput = (activeEl) => { + if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") { + for (const evt of ["change", "input", "blur"]) { + if (`on${evt}` in activeEl) { + const listener = () => { + checkState(); + activeEl.removeEventListener(evt, listener); + }; + activeEl.addEventListener(evt, listener); + return true; + } + } + } +}; + +window.addEventListener( + "keydown", + (e) => { + requestAnimationFrame(async () => { + const activeEl = document.activeElement; + if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") { + // Ignore events on inputs, they have their native history + return; + } + + // Check if this is a ctrl+z ctrl+y + if (await undoRedo(e)) return; + + // If our active element is some type of input then handle changes after they're done + if (bindInput(activeEl)) return; + checkState(); + }); + }, + true +); + +// Handle clicking DOM elements (e.g. widgets) +window.addEventListener("mouseup", () => { + checkState(); +}); + +// Handle litegraph clicks +const processMouseUp = LGraphCanvas.prototype.processMouseUp; +LGraphCanvas.prototype.processMouseUp = function (e) { + const v = processMouseUp.apply(this, arguments); + checkState(); + return v; +}; +const processMouseDown = LGraphCanvas.prototype.processMouseDown; +LGraphCanvas.prototype.processMouseDown = function (e) { + const v = processMouseDown.apply(this, arguments); + checkState(); + return v; +}; From 9be0b30cf1f69384e72823f5112072b15f1f431d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 27 Nov 2023 14:02:50 +0000 Subject: [PATCH 246/420] fix formatting --- web/extensions/core/undoRedo.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js index 1c1d785a8e9..c6613b0f02d 100644 --- a/web/extensions/core/undoRedo.js +++ b/web/extensions/core/undoRedo.js @@ -10,9 +10,9 @@ function checkState() { const currentState = app.graph.serialize(); if (!graphEqual(activeState, currentState)) { undo.push(activeState); - if(undo.length > MAX_HISTORY) { - undo.shift(); - } + if (undo.length > MAX_HISTORY) { + undo.shift(); + } activeState = clone(currentState); redo.length = 0; } From be71bb5e13d716c541a5372a518e9d512073fe18 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 14:04:16 -0500 Subject: [PATCH 247/420] Tweak memory inference calculations a bit. --- comfy/model_base.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 34274c4aeee..3d6879ae631 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -164,12 +164,13 @@ def set_inpaint(self): self.inpaint_model = True def memory_required(self, input_shape): - area = input_shape[0] * input_shape[2] * input_shape[3] if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - return (area / (comfy.model_management.dtype_size(self.get_dtype()) * 10)) * (1024 * 1024) + area = max(input_shape[0], 3) * input_shape[2] * input_shape[3] + return (area * comfy.model_management.dtype_size(self.get_dtype()) / 60) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. + area = input_shape[0] * input_shape[2] * input_shape[3] return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) From 13fdee6abf7a7b072ad0f1ebbaa76aca13ddd2a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 14:55:40 -0500 Subject: [PATCH 248/420] Try to free memory for both cond+uncond before inference. --- comfy/model_base.py | 4 ++-- comfy/sample.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 3d6879ae631..786c9cf47ba 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -166,8 +166,8 @@ def set_inpaint(self): def memory_required(self, input_shape): if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - area = max(input_shape[0], 3) * input_shape[2] * input_shape[3] - return (area * comfy.model_management.dtype_size(self.get_dtype()) / 60) * (1024 * 1024) + area = input_shape[0] * input_shape[2] * input_shape[3] + return (area * comfy.model_management.dtype_size(self.get_dtype()) / 50) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. area = input_shape[0] * input_shape[2] * input_shape[3] diff --git a/comfy/sample.py b/comfy/sample.py index 4bfdb8ce55d..034db97ee88 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -83,7 +83,7 @@ def prepare_sampling(model, noise_shape, positive, negative, noise_mask): real_model = None models, inference_memory = get_additional_models(positive, negative, model.model_dtype()) - comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise_shape) + inference_memory) + comfy.model_management.load_models_gpu([model] + models, model.memory_required([noise_shape[0] * 2] + list(noise_shape[1:])) + inference_memory) real_model = model.model return real_model, positive, negative, noise_mask, models From 488de0b4df524589c11a9bd0e2b3663d03003342 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 16:32:03 -0500 Subject: [PATCH 249/420] ModelSamplingDiscreteLCM -> ModelSamplingDiscreteDistilled --- comfy_extras/nodes_model_advanced.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 6991c983728..20261aadea6 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -17,7 +17,9 @@ def calculate_denoised(self, sigma, model_output, model_input): return c_out * x0 + c_skip * model_input -class ModelSamplingDiscreteLCM(torch.nn.Module): +class ModelSamplingDiscreteDistilled(torch.nn.Module): + original_timesteps = 50 + def __init__(self): super().__init__() self.sigma_data = 1.0 @@ -29,13 +31,12 @@ def __init__(self): alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) - original_timesteps = 50 - self.skip_steps = timesteps // original_timesteps + self.skip_steps = timesteps // self.original_timesteps - alphas_cumprod_valid = torch.zeros((original_timesteps), dtype=torch.float32) - for x in range(original_timesteps): - alphas_cumprod_valid[original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] + alphas_cumprod_valid = torch.zeros((self.original_timesteps), dtype=torch.float32) + for x in range(self.original_timesteps): + alphas_cumprod_valid[self.original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5 self.set_sigmas(sigmas) @@ -116,7 +117,7 @@ def patch(self, model, sampling, zsnr): sampling_type = comfy.model_sampling.V_PREDICTION elif sampling == "lcm": sampling_type = LCM - sampling_base = ModelSamplingDiscreteLCM + sampling_base = ModelSamplingDiscreteDistilled class ModelSamplingAdvanced(sampling_base, sampling_type): pass From f30b992b18078415f7c31c6c2f5ad1513db0bf5e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 16:41:33 -0500 Subject: [PATCH 250/420] .sigma and .timestep now return tensors on the same device as the input. --- comfy/model_sampling.py | 6 +++--- comfy_extras/nodes_model_advanced.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index fac5c995e41..69c8b1f01fc 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -65,15 +65,15 @@ def sigma_max(self): def timestep(self, sigma): log_sigma = sigma.log() dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) + return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device) def sigma(self, timestep): - t = torch.clamp(timestep.float(), min=0, max=(len(self.sigmas) - 1)) + t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1)) low_idx = t.floor().long() high_idx = t.ceil().long() w = t.frac() log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() + return log_sigma.exp().to(timestep.device) def percent_to_sigma(self, percent): if percent <= 0.0: diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 20261aadea6..efcdf1932e4 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -56,15 +56,15 @@ def sigma_max(self): def timestep(self, sigma): log_sigma = sigma.log() dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None] - return dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1) + return (dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)).to(sigma.device) def sigma(self, timestep): - t = torch.clamp(((timestep - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) + t = torch.clamp(((timestep.float().to(self.log_sigmas.device) - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1)) low_idx = t.floor().long() high_idx = t.ceil().long() w = t.frac() log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] - return log_sigma.exp() + return log_sigma.exp().to(timestep.device) def percent_to_sigma(self, percent): if percent <= 0.0: From c45d1b9b67a98c9ff9743b93caf8303286a430c3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 27 Nov 2023 17:32:07 -0500 Subject: [PATCH 251/420] Add a function to load a unet from a state dict. --- comfy/sd.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 7f85540c4eb..53c79e1c57a 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -481,20 +481,18 @@ class WeightsLoader(torch.nn.Module): return (model_patcher, clip, vae, clipvision) -def load_unet(unet_path): #load unet in diffusers format - sd = comfy.utils.load_torch_file(unet_path) +def load_unet_state_dict(sd): #load unet in diffusers format parameters = comfy.utils.calculate_parameters(sd) unet_dtype = model_management.unet_dtype(model_params=parameters) if "input_blocks.0.0.weight" in sd: #ldm model_config = model_detection.model_config_from_unet(sd, "", unet_dtype) if model_config is None: - raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + return None new_sd = sd else: #diffusers model_config = model_detection.model_config_from_diffusers_unet(sd, unet_dtype) if model_config is None: - print("ERROR UNSUPPORTED UNET", unet_path) return None diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config) @@ -514,6 +512,14 @@ def load_unet(unet_path): #load unet in diffusers format print("left over keys in unet:", left_over) return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device) +def load_unet(unet_path): + sd = comfy.utils.load_torch_file(unet_path) + model = load_unet_state_dict(sd) + if model is None: + print("ERROR UNSUPPORTED UNET", unet_path) + raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) + return model + def save_checkpoint(output_path, model, clip, vae, metadata=None): model_management.load_models_gpu([model, clip.load_model()]) sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd()) From 798a34d009cd78f02bd4c0b30f1c9fd6a594d345 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 04:57:59 -0500 Subject: [PATCH 252/420] Lower compress level for image preview. --- nodes.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index df40f809456..8b4a9b11932 100644 --- a/nodes.py +++ b/nodes.py @@ -1337,6 +1337,7 @@ def __init__(self): self.output_dir = folder_paths.get_output_directory() self.type = "output" self.prefix_append = "" + self.compress_level = 4 @classmethod def INPUT_TYPES(s): @@ -1370,7 +1371,7 @@ def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pngi metadata.add_text(x, json.dumps(extra_pnginfo[x])) file = f"{filename}_{counter:05}_.png" - img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=self.compress_level) results.append({ "filename": file, "subfolder": subfolder, @@ -1385,6 +1386,7 @@ def __init__(self): self.output_dir = folder_paths.get_temp_directory() self.type = "temp" self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 1 @classmethod def INPUT_TYPES(s): From 983ebc579212e209f52dff014b79bfe1932c0959 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 04:58:32 -0500 Subject: [PATCH 253/420] Use smart model management for VAE to decrease latency. --- comfy/sd.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 53c79e1c57a..f4f84d0a032 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -187,10 +187,12 @@ def __init__(self, sd=None, device=None, config=None): if device is None: device = model_management.vae_device() self.device = device - self.offload_device = model_management.vae_offload_device() + offload_device = model_management.vae_offload_device() self.vae_dtype = model_management.vae_dtype() self.first_stage_model.to(self.vae_dtype) + self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) + def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16): steps = samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x, tile_y, overlap) steps += samples.shape[0] * comfy.utils.get_tiled_scale_steps(samples.shape[3], samples.shape[2], tile_x // 2, tile_y * 2, overlap) @@ -219,10 +221,9 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): return samples def decode(self, samples_in): - self.first_stage_model = self.first_stage_model.to(self.device) try: memory_used = self.memory_used_decode(samples_in.shape, self.vae_dtype) - model_management.free_memory(memory_used, self.device) + model_management.load_models_gpu([self.patcher], memory_required=memory_used) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) @@ -235,22 +236,19 @@ def decode(self, samples_in): print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") pixel_samples = self.decode_tiled_(samples_in) - self.first_stage_model = self.first_stage_model.to(self.offload_device) pixel_samples = pixel_samples.cpu().movedim(1,-1) return pixel_samples def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16): - self.first_stage_model = self.first_stage_model.to(self.device) + model_management.load_model_gpu(self.patcher) output = self.decode_tiled_(samples, tile_x, tile_y, overlap) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return output.movedim(1,-1) def encode(self, pixel_samples): - self.first_stage_model = self.first_stage_model.to(self.device) pixel_samples = pixel_samples.movedim(-1,1) try: memory_used = self.memory_used_encode(pixel_samples.shape, self.vae_dtype) - model_management.free_memory(memory_used, self.device) + model_management.load_models_gpu([self.patcher], memory_required=memory_used) free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) @@ -263,14 +261,12 @@ def encode(self, pixel_samples): print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") samples = self.encode_tiled_(pixel_samples) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return samples def encode_tiled(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): - self.first_stage_model = self.first_stage_model.to(self.device) + model_management.load_model_gpu(self.patcher) pixel_samples = pixel_samples.movedim(-1,1) samples = self.encode_tiled_(pixel_samples, tile_x=tile_x, tile_y=tile_y, overlap=overlap) - self.first_stage_model = self.first_stage_model.to(self.offload_device) return samples def get_sd(self): From 21063fa35b53683f6ca01ccf1a5d5b509f702ba7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 11:01:05 -0500 Subject: [PATCH 254/420] Lower compress level of png sent on websocket. --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index 1a8e92b8f96..9b1e3269d7f 100644 --- a/server.py +++ b/server.py @@ -576,7 +576,7 @@ async def send_image(self, image_data, sid=None): bytesIO = BytesIO() header = struct.pack(">I", type_num) bytesIO.write(header) - image.save(bytesIO, format=image_type, quality=95, compress_level=4) + image.save(bytesIO, format=image_type, quality=95, compress_level=1) preview_bytes = bytesIO.getvalue() await self.send_bytes(BinaryEventTypes.PREVIEW_IMAGE, preview_bytes, sid=sid) From 57d7f4464f2a40521666cc8436711f73bf728a97 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 13:35:32 -0500 Subject: [PATCH 255/420] Add SDTurboScheduler node. --- comfy_extras/nodes_custom_sampler.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d3c1d4a23ee..008d0b8d6be 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -81,6 +81,25 @@ def get_sigmas(self, steps, sigma_max, sigma_min, rho): sigmas = k_diffusion_sampling.get_sigmas_polyexponential(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho) return (sigmas, ) +class SDTurboScheduler: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"model": ("MODEL",), + "steps": ("INT", {"default": 1, "min": 1, "max": 10}), + } + } + RETURN_TYPES = ("SIGMAS",) + CATEGORY = "sampling/custom_sampling/schedulers" + + FUNCTION = "get_sigmas" + + def get_sigmas(self, model, steps): + timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps] + sigmas = model.model.model_sampling.sigma(timesteps) + sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) + return (sigmas, ) + class VPScheduler: @classmethod def INPUT_TYPES(s): @@ -257,6 +276,7 @@ def sample(self, model, add_noise, noise_seed, cfg, positive, negative, sampler, "ExponentialScheduler": ExponentialScheduler, "PolyexponentialScheduler": PolyexponentialScheduler, "VPScheduler": VPScheduler, + "SDTurboScheduler": SDTurboScheduler, "KSamplerSelect": KSamplerSelect, "SamplerDPMPP_2M_SDE": SamplerDPMPP_2M_SDE, "SamplerDPMPP_SDE": SamplerDPMPP_SDE, From b911eefc4278b6069390d01a6ac9010ae6eecbac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 14:20:56 -0500 Subject: [PATCH 256/420] Limit gc.collect() to once every 10 seconds. --- main.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/main.py b/main.py index 1100a07f42a..3997fbefcb3 100644 --- a/main.py +++ b/main.py @@ -88,6 +88,7 @@ def cuda_malloc_warning(): def prompt_worker(q, server): e = execution.PromptExecutor(server) + last_gc_collect = 0 while True: item, item_id = q.get() execution_start_time = time.perf_counter() @@ -97,9 +98,14 @@ def prompt_worker(q, server): if server.client_id is not None: server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) - print("Prompt executed in {:.2f} seconds".format(time.perf_counter() - execution_start_time)) - gc.collect() - comfy.model_management.soft_empty_cache() + current_time = time.perf_counter() + execution_time = current_time - execution_start_time + print("Prompt executed in {:.2f} seconds".format(execution_time)) + if (current_time - last_gc_collect) > 10.0: + gc.collect() + comfy.model_management.soft_empty_cache() + last_gc_collect = current_time + print("gc collect") async def run(server, address='', port=8188, verbose=True, call_on_start=None): await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) From 777f6b15225197898a5f49742682a2be859072d7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 28 Nov 2023 14:45:00 -0500 Subject: [PATCH 257/420] Add to README that SDXL Turbo is supported. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 9d7e317907f..af1f2281158 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ This ui will let you design and execute advanced stable diffusion pipelines usin - [GLIGEN](https://comfyanonymous.github.io/ComfyUI_examples/gligen/) - [Model Merging](https://comfyanonymous.github.io/ComfyUI_examples/model_merging/) - [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/) +- [SDXL Turbo](https://comfyanonymous.github.io/ComfyUI_examples/sdturbo/) - Latent previews with [TAESD](#how-to-show-high-quality-previews) - Starts up very fast. - Works fully offline: will never download anything. From 7f469203b7b4547f1d0f7113d18095334fa06a4d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Thu, 30 Nov 2023 19:13:27 +0000 Subject: [PATCH 258/420] Group nodes (#1776) * setup ui unit tests * Refactoring, adding connections * Few tweaks * Fix type * Add general test * Refactored and extended test * move to describe * for groups * wip group nodes * Relink nodes Fixed widget values Convert to nodes * Reconnect on convert back * add via node menu + canvas refactor * Add ws event handling * fix using wrong node on widget serialize * allow reroute pipe fix control_after_generate configure * allow multiple images * Add test for converted widgets on missing nodes + fix crash * tidy * mores tests + refactor * throw earlier to get less confusing error * support outputs * more test * add ci action * use lts node * Fix? * Prevent connecting non matching combos * update * accidently removed npm i * Disable logging extension * fix naming allow control_after_generate custom name allow convert from reroutes * group node tests * Add executing info, custom node icon Tidy * internal reroute just works * Fix crash on virtual nodes e.g. note * Save group nodes to templates * Fix template nodes not being stored * Fix aborting convert * tidy * Fix reconnecting output links on convert to group * Fix links on convert to nodes * Handle missing internal nodes * Trigger callback on text change * Apply value on connect * Fix converted widgets not reconnecting * Group node updates - persist internal ids in current session - copy widget values when converting to nodes - fix issue serializing converted inputs * Resolve issue with sanitized node name * Fix internal id * allow outputs to be used internally and externally * order widgets on group node various fixes * fix imageupload widget requiring a specific name * groupnode imageupload test give widget unique name * Fix issue with external node links * Add VAE model * Fix internal node id check * fix potential crash * wip widget input support * more wip group widget inputs * Group node refactor Support for primitives/converted widgets * Fix convert to nodes with internal reroutes * fix applying primitive * Fix control widget values * fix test --- .vscode/settings.json | 9 + tests-ui/setup.js | 1 + tests-ui/tests/groupNode.test.js | 818 ++++++++++++++++++++ tests-ui/tests/widgetInputs.test.js | 4 +- tests-ui/utils/ezgraph.js | 46 +- tests-ui/utils/index.js | 60 +- tests-ui/utils/setup.js | 20 +- web/extensions/core/groupNode.js | 1054 ++++++++++++++++++++++++++ web/extensions/core/nodeTemplates.js | 57 +- web/extensions/core/widgetInputs.js | 225 +++--- web/scripts/app.js | 322 ++++---- web/scripts/domWidget.js | 3 +- web/scripts/ui.js | 8 +- web/scripts/widgets.js | 144 +++- 14 files changed, 2416 insertions(+), 355 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 tests-ui/tests/groupNode.test.js create mode 100644 web/extensions/core/groupNode.js diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..202121e10fc --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,9 @@ +{ + "path-intellisense.mappings": { + "../": "${workspaceFolder}/web/extensions/core" + }, + "[python]": { + "editor.defaultFormatter": "ms-python.autopep8" + }, + "python.formatting.provider": "none" +} diff --git a/tests-ui/setup.js b/tests-ui/setup.js index 0f368ab22f9..8bbd9dcdf20 100644 --- a/tests-ui/setup.js +++ b/tests-ui/setup.js @@ -20,6 +20,7 @@ async function setup() { // Modify the response data to add some checkpoints const objectInfo = JSON.parse(data); objectInfo.CheckpointLoaderSimple.input.required.ckpt_name[0] = ["model1.safetensors", "model2.ckpt"]; + objectInfo.VAELoader.input.required.vae_name[0] = ["vae1.safetensors", "vae2.ckpt"]; data = JSON.stringify(objectInfo, undefined, "\t"); diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js new file mode 100644 index 00000000000..ce54c11542c --- /dev/null +++ b/tests-ui/tests/groupNode.test.js @@ -0,0 +1,818 @@ +// @ts-check +/// + +const { start, createDefaultWorkflow } = require("../utils"); +const lg = require("../utils/litegraph"); + +describe("group node", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + /** + * + * @param {*} app + * @param {*} graph + * @param {*} name + * @param {*} nodes + * @returns { Promise> } + */ + async function convertToGroup(app, graph, name, nodes) { + // Select the nodes we are converting + for (const n of nodes) { + n.select(true); + } + + expect(Object.keys(app.canvas.selected_nodes).sort((a, b) => +a - +b)).toEqual( + nodes.map((n) => n.id + "").sort((a, b) => +a - +b) + ); + + global.prompt = jest.fn().mockImplementation(() => name); + const groupNode = await nodes[0].menu["Convert to Group Node"].call(false); + + // Check group name was requested + expect(window.prompt).toHaveBeenCalled(); + + // Ensure old nodes are removed + for (const n of nodes) { + expect(n.isRemoved).toBeTruthy(); + } + + expect(groupNode.type).toEqual("workflow/" + name); + + return graph.find(groupNode); + } + + /** + * @param { Record | number[] } idMap + * @param { Record> } valueMap + */ + function getOutput(idMap = {}, valueMap = {}) { + if (idMap instanceof Array) { + idMap = idMap.reduce((p, n) => { + p[n] = n + ""; + return p; + }, {}); + } + const expected = { + 1: { inputs: { ckpt_name: "model1.safetensors", ...valueMap?.[1] }, class_type: "CheckpointLoaderSimple" }, + 2: { inputs: { text: "positive", clip: ["1", 1], ...valueMap?.[2] }, class_type: "CLIPTextEncode" }, + 3: { inputs: { text: "negative", clip: ["1", 1], ...valueMap?.[3] }, class_type: "CLIPTextEncode" }, + 4: { inputs: { width: 512, height: 512, batch_size: 1, ...valueMap?.[4] }, class_type: "EmptyLatentImage" }, + 5: { + inputs: { + seed: 0, + steps: 20, + cfg: 8, + sampler_name: "euler", + scheduler: "normal", + denoise: 1, + model: ["1", 0], + positive: ["2", 0], + negative: ["3", 0], + latent_image: ["4", 0], + ...valueMap?.[5], + }, + class_type: "KSampler", + }, + 6: { inputs: { samples: ["5", 0], vae: ["1", 2], ...valueMap?.[6] }, class_type: "VAEDecode" }, + 7: { inputs: { filename_prefix: "ComfyUI", images: ["6", 0], ...valueMap?.[7] }, class_type: "SaveImage" }, + }; + + // Map old IDs to new at the top level + const mapped = {}; + for (const oldId in idMap) { + mapped[idMap[oldId]] = expected[oldId]; + delete expected[oldId]; + } + Object.assign(mapped, expected); + + // Map old IDs to new inside links + for (const k in mapped) { + for (const input in mapped[k].inputs) { + const v = mapped[k].inputs[input]; + if (v instanceof Array) { + if (v[0] in idMap) { + v[0] = idMap[v[0]] + ""; + } + } + } + } + + return mapped; + } + + test("can be created from selected nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty]); + + // Ensure links are now to the group node + expect(group.inputs).toHaveLength(2); + expect(group.outputs).toHaveLength(3); + + expect(group.inputs.map((i) => i.input.name)).toEqual(["clip", "CLIPTextEncode clip"]); + expect(group.outputs.map((i) => i.output.name)).toEqual(["LATENT", "CONDITIONING", "CLIPTextEncode CONDITIONING"]); + + // ckpt clip to both clip inputs on the group + expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [group.id, 0], + [group.id, 1], + ]); + + // group conditioning to sampler + expect(group.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 1], + ]); + // group conditioning 2 to sampler + expect( + group.outputs["CLIPTextEncode CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index]) + ).toEqual([[nodes.sampler.id, 2]]); + // group latent to sampler + expect(group.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 3], + ]); + }); + + test("maintains all output links on conversion", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const save2 = ez.SaveImage(...nodes.decode.outputs); + const save3 = ez.SaveImage(...nodes.decode.outputs); + // Ensure an output with multiple links maintains them on convert to group + const group = await convertToGroup(app, graph, "test", [nodes.sampler, nodes.decode]); + expect(group.outputs[0].connections.length).toBe(3); + expect(group.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id); + expect(group.outputs[0].connections[1].targetNode.id).toBe(save2.id); + expect(group.outputs[0].connections[2].targetNode.id).toBe(save3.id); + + // and they're still linked when converting back to nodes + const newNodes = group.menu["Convert to nodes"].call(); + const decode = graph.find(newNodes.find((n) => n.type === "VAEDecode")); + expect(decode.outputs[0].connections.length).toBe(3); + expect(decode.outputs[0].connections[0].targetNode.id).toBe(nodes.save.id); + expect(decode.outputs[0].connections[1].targetNode.id).toBe(save2.id); + expect(decode.outputs[0].connections[2].targetNode.id).toBe(save3.id); + }); + test("can be be converted back to nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const toConvert = [nodes.pos, nodes.neg, nodes.empty, nodes.sampler]; + const group = await convertToGroup(app, graph, "test", toConvert); + + // Edit some values to ensure they are set back onto the converted nodes + expect(group.widgets["text"].value).toBe("positive"); + group.widgets["text"].value = "pos"; + expect(group.widgets["CLIPTextEncode text"].value).toBe("negative"); + group.widgets["CLIPTextEncode text"].value = "neg"; + expect(group.widgets["width"].value).toBe(512); + group.widgets["width"].value = 1024; + expect(group.widgets["sampler_name"].value).toBe("euler"); + group.widgets["sampler_name"].value = "ddim"; + expect(group.widgets["control_after_generate"].value).toBe("randomize"); + group.widgets["control_after_generate"].value = "fixed"; + + /** @type { Array } */ + group.menu["Convert to nodes"].call(); + + // ensure widget values are set + const pos = graph.find(nodes.pos.id); + expect(pos.node.type).toBe("CLIPTextEncode"); + expect(pos.widgets["text"].value).toBe("pos"); + const neg = graph.find(nodes.neg.id); + expect(neg.node.type).toBe("CLIPTextEncode"); + expect(neg.widgets["text"].value).toBe("neg"); + const empty = graph.find(nodes.empty.id); + expect(empty.node.type).toBe("EmptyLatentImage"); + expect(empty.widgets["width"].value).toBe(1024); + const sampler = graph.find(nodes.sampler.id); + expect(sampler.node.type).toBe("KSampler"); + expect(sampler.widgets["sampler_name"].value).toBe("ddim"); + expect(sampler.widgets["control_after_generate"].value).toBe("fixed"); + + // validate links + expect(nodes.ckpt.outputs.CLIP.connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [pos.id, 0], + [neg.id, 0], + ]); + + expect(pos.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 1], + ]); + + expect(neg.outputs["CONDITIONING"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 2], + ]); + + expect(empty.outputs["LATENT"].connections.map((t) => [t.targetNode.id, t.targetInput.index])).toEqual([ + [nodes.sampler.id, 3], + ]); + }); + test("it can embed reroutes as inputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Add and connect a reroute to the clip text encodes + const reroute = ez.Reroute(); + nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]); + reroute.outputs[0].connectTo(nodes.pos.inputs[0]); + reroute.outputs[0].connectTo(nodes.neg.inputs[0]); + + // Convert to group and ensure we only have 1 input of the correct type + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg, nodes.empty, reroute]); + expect(group.inputs).toHaveLength(1); + expect(group.inputs[0].input.type).toEqual("CLIP"); + + expect((await graph.toPrompt()).output).toEqual(getOutput()); + }); + test("it can embed reroutes as outputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Add a reroute with no output so we output IMAGE even though its used internally + const reroute = ez.Reroute(); + nodes.decode.outputs.IMAGE.connectTo(reroute.inputs[0]); + + // Convert to group and ensure there is an IMAGE output + const group = await convertToGroup(app, graph, "test", [nodes.decode, nodes.save, reroute]); + expect(group.outputs).toHaveLength(1); + expect(group.outputs[0].output.type).toEqual("IMAGE"); + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.decode.id, nodes.save.id])); + }); + test("it can embed reroutes as pipes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + // Use reroutes as a pipe + const rerouteModel = ez.Reroute(); + const rerouteClip = ez.Reroute(); + const rerouteVae = ez.Reroute(); + nodes.ckpt.outputs.MODEL.connectTo(rerouteModel.inputs[0]); + nodes.ckpt.outputs.CLIP.connectTo(rerouteClip.inputs[0]); + nodes.ckpt.outputs.VAE.connectTo(rerouteVae.inputs[0]); + + const group = await convertToGroup(app, graph, "test", [rerouteModel, rerouteClip, rerouteVae]); + + expect(group.outputs).toHaveLength(3); + expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]); + + expect(group.outputs).toHaveLength(3); + expect(group.outputs.map((o) => o.output.type)).toEqual(["MODEL", "CLIP", "VAE"]); + + group.outputs[0].connectTo(nodes.sampler.inputs.model); + group.outputs[1].connectTo(nodes.pos.inputs.clip); + group.outputs[1].connectTo(nodes.neg.inputs.clip); + }); + test("can handle reroutes used internally", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + let reroutes = []; + let prevNode = nodes.ckpt; + for(let i = 0; i < 5; i++) { + const reroute = ez.Reroute(); + prevNode.outputs[0].connectTo(reroute.inputs[0]); + prevNode = reroute; + reroutes.push(reroute); + } + prevNode.outputs[0].connectTo(nodes.sampler.inputs.model); + + const group = await convertToGroup(app, graph, "test", [...reroutes, ...Object.values(nodes)]); + expect((await graph.toPrompt()).output).toEqual(getOutput()); + + group.menu["Convert to nodes"].call(); + expect((await graph.toPrompt()).output).toEqual(getOutput()); + }); + test("creates with widget values from inner nodes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + nodes.ckpt.widgets.ckpt_name.value = "model2.ckpt"; + nodes.pos.widgets.text.value = "hello"; + nodes.neg.widgets.text.value = "world"; + nodes.empty.widgets.width.value = 256; + nodes.empty.widgets.height.value = 1024; + nodes.sampler.widgets.seed.value = 1; + nodes.sampler.widgets.control_after_generate.value = "increment"; + nodes.sampler.widgets.steps.value = 8; + nodes.sampler.widgets.cfg.value = 4.5; + nodes.sampler.widgets.sampler_name.value = "uni_pc"; + nodes.sampler.widgets.scheduler.value = "karras"; + nodes.sampler.widgets.denoise.value = 0.9; + + const group = await convertToGroup(app, graph, "test", [ + nodes.ckpt, + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + ]); + + expect(group.widgets["ckpt_name"].value).toEqual("model2.ckpt"); + expect(group.widgets["text"].value).toEqual("hello"); + expect(group.widgets["CLIPTextEncode text"].value).toEqual("world"); + expect(group.widgets["width"].value).toEqual(256); + expect(group.widgets["height"].value).toEqual(1024); + expect(group.widgets["seed"].value).toEqual(1); + expect(group.widgets["control_after_generate"].value).toEqual("increment"); + expect(group.widgets["steps"].value).toEqual(8); + expect(group.widgets["cfg"].value).toEqual(4.5); + expect(group.widgets["sampler_name"].value).toEqual("uni_pc"); + expect(group.widgets["scheduler"].value).toEqual("karras"); + expect(group.widgets["denoise"].value).toEqual(0.9); + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.ckpt.id, nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id], { + [nodes.ckpt.id]: { ckpt_name: "model2.ckpt" }, + [nodes.pos.id]: { text: "hello" }, + [nodes.neg.id]: { text: "world" }, + [nodes.empty.id]: { width: 256, height: 1024 }, + [nodes.sampler.id]: { + seed: 1, + steps: 8, + cfg: 4.5, + sampler_name: "uni_pc", + scheduler: "karras", + denoise: 0.9, + }, + }) + ); + }); + test("group inputs can be reroutes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + + const reroute = ez.Reroute(); + nodes.ckpt.outputs.CLIP.connectTo(reroute.inputs[0]); + + reroute.outputs[0].connectTo(group.inputs[0]); + reroute.outputs[0].connectTo(group.inputs[1]); + + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id])); + }); + test("group outputs can be reroutes", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + + const reroute1 = ez.Reroute(); + const reroute2 = ez.Reroute(); + group.outputs[0].connectTo(reroute1.inputs[0]); + group.outputs[1].connectTo(reroute2.inputs[0]); + + reroute1.outputs[0].connectTo(nodes.sampler.inputs.positive); + reroute2.outputs[0].connectTo(nodes.sampler.inputs.negative); + + expect((await graph.toPrompt()).output).toEqual(getOutput([nodes.pos.id, nodes.neg.id])); + }); + test("groups can connect to each other", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group1 = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + const group2 = await convertToGroup(app, graph, "test2", [nodes.empty, nodes.sampler]); + + group1.outputs[0].connectTo(group2.inputs["positive"]); + group1.outputs[1].connectTo(group2.inputs["negative"]); + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id]) + ); + }); + test("displays generated image on group node", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + let group = await convertToGroup(app, graph, "test", [ + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + nodes.decode, + nodes.save, + ]); + + const { api } = require("../../web/scripts/api"); + + api.dispatchEvent(new CustomEvent("execution_start", {})); + api.dispatchEvent(new CustomEvent("executing", { detail: `${nodes.save.id}` })); + // Event should be forwarded to group node id + expect(+app.runningNodeId).toEqual(group.id); + expect(group.node["imgs"]).toBeFalsy(); + api.dispatchEvent( + new CustomEvent("executed", { + detail: { + node: `${nodes.save.id}`, + output: { + images: [ + { + filename: "test.png", + type: "output", + }, + ], + }, + }, + }) + ); + + // Trigger paint + group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas); + + expect(group.node["images"]).toEqual([ + { + filename: "test.png", + type: "output", + }, + ]); + + // Reload + const workflow = JSON.stringify((await graph.toPrompt()).workflow); + await app.loadGraphData(JSON.parse(workflow)); + group = graph.find(group); + + // Trigger inner nodes to get created + group.node["getInnerNodes"](); + + // Check it works for internal node ids + api.dispatchEvent(new CustomEvent("execution_start", {})); + api.dispatchEvent(new CustomEvent("executing", { detail: `${group.id}:5` })); + // Event should be forwarded to group node id + expect(+app.runningNodeId).toEqual(group.id); + expect(group.node["imgs"]).toBeFalsy(); + api.dispatchEvent( + new CustomEvent("executed", { + detail: { + node: `${group.id}:5`, + output: { + images: [ + { + filename: "test2.png", + type: "output", + }, + ], + }, + }, + }) + ); + + // Trigger paint + group.node.onDrawBackground?.(app.canvas.ctx, app.canvas.canvas); + + expect(group.node["images"]).toEqual([ + { + filename: "test2.png", + type: "output", + }, + ]); + }); + test("allows widgets to be converted to inputs", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + const group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + group.widgets[0].convertToInput(); + + const primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(group.inputs["text"]); + primitive.widgets[0].value = "hello"; + + expect((await graph.toPrompt()).output).toEqual( + getOutput([nodes.pos.id, nodes.neg.id], { + [nodes.pos.id]: { text: "hello" }, + }) + ); + }); + test("can be copied", async () => { + const { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + const group1 = await convertToGroup(app, graph, "test", [ + nodes.pos, + nodes.neg, + nodes.empty, + nodes.sampler, + nodes.decode, + nodes.save, + ]); + + group1.widgets["text"].value = "hello"; + group1.widgets["width"].value = 256; + group1.widgets["seed"].value = 1; + + // Clone the node + group1.menu.Clone.call(); + expect(app.graph._nodes).toHaveLength(3); + const group2 = graph.find(app.graph._nodes[2]); + expect(group2.node.type).toEqual("workflow/test"); + expect(group2.id).not.toEqual(group1.id); + + // Reconnect ckpt + nodes.ckpt.outputs.MODEL.connectTo(group2.inputs["model"]); + nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["clip"]); + nodes.ckpt.outputs.CLIP.connectTo(group2.inputs["CLIPTextEncode clip"]); + nodes.ckpt.outputs.VAE.connectTo(group2.inputs["vae"]); + + group2.widgets["text"].value = "world"; + group2.widgets["width"].value = 1024; + group2.widgets["seed"].value = 100; + + let i = 0; + expect((await graph.toPrompt()).output).toEqual({ + ...getOutput([nodes.empty.id, nodes.pos.id, nodes.neg.id, nodes.sampler.id, nodes.decode.id, nodes.save.id], { + [nodes.empty.id]: { width: 256 }, + [nodes.pos.id]: { text: "hello" }, + [nodes.sampler.id]: { seed: 1 }, + }), + ...getOutput( + { + [nodes.empty.id]: `${group2.id}:${i++}`, + [nodes.pos.id]: `${group2.id}:${i++}`, + [nodes.neg.id]: `${group2.id}:${i++}`, + [nodes.sampler.id]: `${group2.id}:${i++}`, + [nodes.decode.id]: `${group2.id}:${i++}`, + [nodes.save.id]: `${group2.id}:${i++}`, + }, + { + [nodes.empty.id]: { width: 1024 }, + [nodes.pos.id]: { text: "world" }, + [nodes.sampler.id]: { seed: 100 }, + } + ), + }); + + graph.arrange(); + }); + test("is embedded in workflow", async () => { + let { ez, graph, app } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + let group = await convertToGroup(app, graph, "test", [nodes.pos, nodes.neg]); + const workflow = JSON.stringify((await graph.toPrompt()).workflow); + + // Clear the environment + ({ ez, graph, app } = await start({ + resetEnv: true, + })); + // Ensure the node isnt registered + expect(() => ez["workflow/test"]).toThrow(); + + // Reload the workflow + await app.loadGraphData(JSON.parse(workflow)); + + // Ensure the node is found + group = graph.find(group); + + // Generate prompt and ensure it is as expected + expect((await graph.toPrompt()).output).toEqual( + getOutput({ + [nodes.pos.id]: `${group.id}:0`, + [nodes.neg.id]: `${group.id}:1`, + }) + ); + }); + test("shows missing node error on missing internal node when loading graph data", async () => { + const { graph } = await start(); + + const dialogShow = jest.spyOn(graph.app.ui.dialog, "show"); + await graph.app.loadGraphData({ + last_node_id: 3, + last_link_id: 1, + nodes: [ + { + id: 3, + type: "workflow/testerror", + }, + ], + links: [], + groups: [], + config: {}, + extra: { + groupNodes: { + testerror: { + nodes: [ + { + type: "NotKSampler", + }, + { + type: "NotVAEDecode", + }, + ], + }, + }, + }, + }); + + expect(dialogShow).toBeCalledTimes(1); + const call = dialogShow.mock.calls[0][0].innerHTML; + expect(call).toContain("the following node types were not found"); + expect(call).toContain("NotKSampler"); + expect(call).toContain("NotVAEDecode"); + expect(call).toContain("workflow/testerror"); + }); + test("maintains widget inputs on conversion back to nodes", async () => { + const { ez, graph, app } = await start(); + let pos = ez.CLIPTextEncode({ text: "positive" }); + pos.node.title = "Positive"; + let neg = ez.CLIPTextEncode({ text: "negative" }); + neg.node.title = "Negative"; + pos.widgets.text.convertToInput(); + neg.widgets.text.convertToInput(); + + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(pos.inputs.text); + primitive.outputs[0].connectTo(neg.inputs.text); + + const group = await convertToGroup(app, graph, "test", [pos, neg, primitive]); + // This will use a primitive widget named 'value' + expect(group.widgets.length).toBe(1); + expect(group.widgets["value"].value).toBe("positive"); + + const newNodes = group.menu["Convert to nodes"].call(); + pos = graph.find(newNodes.find((n) => n.title === "Positive")); + neg = graph.find(newNodes.find((n) => n.title === "Negative")); + primitive = graph.find(newNodes.find((n) => n.type === "PrimitiveNode")); + + expect(pos.inputs).toHaveLength(2); + expect(neg.inputs).toHaveLength(2); + expect(primitive.outputs[0].connections).toHaveLength(2); + + expect((await graph.toPrompt()).output).toEqual({ + 1: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" }, + 2: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" }, + }); + }); + test("adds widgets in node execution order", async () => { + const { ez, graph, app } = await start(); + const scale = ez.LatentUpscale(); + const save = ez.SaveImage(); + const empty = ez.EmptyLatentImage(); + const decode = ez.VAEDecode(); + + scale.outputs.LATENT.connectTo(decode.inputs.samples); + decode.outputs.IMAGE.connectTo(save.inputs.images); + empty.outputs.LATENT.connectTo(scale.inputs.samples); + + const group = await convertToGroup(app, graph, "test", [scale, save, empty, decode]); + const widgets = group.widgets.map((w) => w.widget.name); + expect(widgets).toStrictEqual([ + "width", + "height", + "batch_size", + "upscale_method", + "LatentUpscale width", + "LatentUpscale height", + "crop", + "filename_prefix", + ]); + }); + test("adds output for external links when converting to group", async () => { + const { ez, graph, app } = await start(); + const img = ez.EmptyLatentImage(); + let decode = ez.VAEDecode(...img.outputs); + const preview1 = ez.PreviewImage(...decode.outputs); + const preview2 = ez.PreviewImage(...decode.outputs); + + const group = await convertToGroup(app, graph, "test", [img, decode, preview1]); + + // Ensure we have an output connected to the 2nd preview node + expect(group.outputs.length).toBe(1); + expect(group.outputs[0].connections.length).toBe(1); + expect(group.outputs[0].connections[0].targetNode.id).toBe(preview2.id); + + // Convert back and ensure bothe previews are still connected + group.menu["Convert to nodes"].call(); + decode = graph.find(decode); + expect(decode.outputs[0].connections.length).toBe(2); + expect(decode.outputs[0].connections[0].targetNode.id).toBe(preview1.id); + expect(decode.outputs[0].connections[1].targetNode.id).toBe(preview2.id); + }); + test("adds output for external links when converting to group when nodes are not in execution order", async () => { + const { ez, graph, app } = await start(); + const sampler = ez.KSampler(); + const ckpt = ez.CheckpointLoaderSimple(); + const empty = ez.EmptyLatentImage(); + const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" }); + const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" }); + const decode1 = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE); + const save = ez.SaveImage(decode1.outputs.IMAGE); + ckpt.outputs.MODEL.connectTo(sampler.inputs.model); + pos.outputs.CONDITIONING.connectTo(sampler.inputs.positive); + neg.outputs.CONDITIONING.connectTo(sampler.inputs.negative); + empty.outputs.LATENT.connectTo(sampler.inputs.latent_image); + + const encode = ez.VAEEncode(decode1.outputs.IMAGE); + const vae = ez.VAELoader(); + const decode2 = ez.VAEDecode(encode.outputs.LATENT, vae.outputs.VAE); + const preview = ez.PreviewImage(decode2.outputs.IMAGE); + vae.outputs.VAE.connectTo(encode.inputs.vae); + + const group = await convertToGroup(app, graph, "test", [vae, decode1, encode, sampler]); + + expect(group.outputs.length).toBe(3); + expect(group.outputs[0].output.name).toBe("VAE"); + expect(group.outputs[0].output.type).toBe("VAE"); + expect(group.outputs[1].output.name).toBe("IMAGE"); + expect(group.outputs[1].output.type).toBe("IMAGE"); + expect(group.outputs[2].output.name).toBe("LATENT"); + expect(group.outputs[2].output.type).toBe("LATENT"); + + expect(group.outputs[0].connections.length).toBe(1); + expect(group.outputs[0].connections[0].targetNode.id).toBe(decode2.id); + expect(group.outputs[0].connections[0].targetInput.index).toBe(1); + + expect(group.outputs[1].connections.length).toBe(1); + expect(group.outputs[1].connections[0].targetNode.id).toBe(save.id); + expect(group.outputs[1].connections[0].targetInput.index).toBe(0); + + expect(group.outputs[2].connections.length).toBe(1); + expect(group.outputs[2].connections[0].targetNode.id).toBe(decode2.id); + expect(group.outputs[2].connections[0].targetInput.index).toBe(0); + + expect((await graph.toPrompt()).output).toEqual({ + ...getOutput({ 1: ckpt.id, 2: pos.id, 3: neg.id, 4: empty.id, 5: sampler.id, 6: decode1.id, 7: save.id }), + [vae.id]: { inputs: { vae_name: "vae1.safetensors" }, class_type: vae.node.type }, + [encode.id]: { inputs: { pixels: ["6", 0], vae: [vae.id + "", 0] }, class_type: encode.node.type }, + [decode2.id]: { inputs: { samples: [encode.id + "", 0], vae: [vae.id + "", 0] }, class_type: decode2.node.type }, + [preview.id]: { inputs: { images: [decode2.id + "", 0] }, class_type: preview.node.type }, + }); + }); + test("works with IMAGEUPLOAD widget", async () => { + const { ez, graph, app } = await start(); + const img = ez.LoadImage(); + const preview1 = ez.PreviewImage(img.outputs[0]); + + const group = await convertToGroup(app, graph, "test", [img, preview1]); + const widget = group.widgets["upload"]; + expect(widget).toBeTruthy(); + expect(widget.widget.type).toBe("button"); + }); + test("internal primitive populates widgets for all linked inputs", async () => { + const { ez, graph, app } = await start(); + const img = ez.LoadImage(); + const scale1 = ez.ImageScale(img.outputs[0]); + const scale2 = ez.ImageScale(img.outputs[0]); + ez.PreviewImage(scale1.outputs[0]); + ez.PreviewImage(scale2.outputs[0]); + + scale1.widgets.width.convertToInput(); + scale2.widgets.height.convertToInput(); + + const primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(scale1.inputs.width); + primitive.outputs[0].connectTo(scale2.inputs.height); + + const group = await convertToGroup(app, graph, "test", [img, primitive, scale1, scale2]); + group.widgets.value.value = 100; + expect((await graph.toPrompt()).output).toEqual({ + 1: { + inputs: { image: img.widgets.image.value, upload: "image" }, + class_type: "LoadImage", + }, + 2: { + inputs: { upscale_method: "nearest-exact", width: 100, height: 512, crop: "disabled", image: ["1", 0] }, + class_type: "ImageScale", + }, + 3: { + inputs: { upscale_method: "nearest-exact", width: 512, height: 100, crop: "disabled", image: ["1", 0] }, + class_type: "ImageScale", + }, + 4: { inputs: { images: ["2", 0] }, class_type: "PreviewImage" }, + 5: { inputs: { images: ["3", 0] }, class_type: "PreviewImage" }, + }); + }); + test("primitive control widgets values are copied on convert", async () => { + const { ez, graph, app } = await start(); + const sampler = ez.KSampler(); + sampler.widgets.seed.convertToInput(); + sampler.widgets.sampler_name.convertToInput(); + + let p1 = ez.PrimitiveNode(); + let p2 = ez.PrimitiveNode(); + p1.outputs[0].connectTo(sampler.inputs.seed); + p2.outputs[0].connectTo(sampler.inputs.sampler_name); + + p1.widgets.control_after_generate.value = "increment"; + p2.widgets.control_after_generate.value = "decrement"; + p2.widgets.control_filter_list.value = "/.*/"; + + p2.node.title = "p2"; + + const group = await convertToGroup(app, graph, "test", [sampler, p1, p2]); + expect(group.widgets.control_after_generate.value).toBe("increment"); + expect(group.widgets["p2 control_after_generate"].value).toBe("decrement"); + expect(group.widgets["p2 control_filter_list"].value).toBe("/.*/"); + + group.widgets.control_after_generate.value = "fixed"; + group.widgets["p2 control_after_generate"].value = "randomize"; + group.widgets["p2 control_filter_list"].value = "/.+/"; + + group.menu["Convert to nodes"].call(); + p1 = graph.find(p1); + p2 = graph.find(p2); + + expect(p1.widgets.control_after_generate.value).toBe("fixed"); + expect(p2.widgets.control_after_generate.value).toBe("randomize"); + expect(p2.widgets.control_filter_list.value).toBe("/.+/"); + }); +}); diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index e1873105acc..8e191adf043 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -202,8 +202,8 @@ describe("widget inputs", () => { }); expect(dialogShow).toBeCalledTimes(1); - expect(dialogShow.mock.calls[0][0]).toContain("the following node types were not found"); - expect(dialogShow.mock.calls[0][0]).toContain("TestNode"); + expect(dialogShow.mock.calls[0][0].innerHTML).toContain("the following node types were not found"); + expect(dialogShow.mock.calls[0][0].innerHTML).toContain("TestNode"); }); test("defaultInput widgets can be converted back to inputs", async () => { diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js index 0e81fd47beb..898b82db051 100644 --- a/tests-ui/utils/ezgraph.js +++ b/tests-ui/utils/ezgraph.js @@ -150,7 +150,7 @@ export class EzNodeMenuItem { if (selectNode) { this.node.select(); } - this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); + return this.item.callback.call(this.node.node, undefined, undefined, undefined, undefined, this.node.node); } } @@ -240,8 +240,12 @@ export class EzNode { return this.#makeLookupArray(() => this.app.canvas.getNodeMenuOptions(this.node), "content", EzNodeMenuItem); } - select() { - this.app.canvas.selectNode(this.node); + get isRemoved() { + return !this.app.graph.getNodeById(this.id); + } + + select(addToSelection = false) { + this.app.canvas.selectNode(this.node, addToSelection); } // /** @@ -275,12 +279,17 @@ export class EzNode { if (!s) return p; const name = s[nameProperty]; + const item = new ctor(this, i, s); // @ts-ignore - if (!name || name in p) { - throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + p.push(item); + if (name) { + // @ts-ignore + if (name in p) { + throw new Error(`Unable to store ${nodeProperty} ${name} on array as name conflicts.`); + } } // @ts-ignore - p.push((p[name] = new ctor(this, i, s))); + p[name] = item; return p; }, Object.assign([], { $: this })); } @@ -348,6 +357,19 @@ export class EzGraph { }, 10); }); } + + /** + * @returns { Promise<{ + * workflow: {}, + * output: Record + * }>}> } + */ + toPrompt() { + // @ts-ignore + return this.app.graphToPrompt(); + } } export const Ez = { @@ -356,12 +378,12 @@ export const Ez = { * @example * const { ez, graph } = Ez.graph(app); * graph.clear(); - * const [model, clip, vae] = ez.CheckpointLoaderSimple(); - * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }); - * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }); - * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage()); - * const [image] = ez.VAEDecode(latent, vae); - * const saveNode = ez.SaveImage(image).node; + * const [model, clip, vae] = ez.CheckpointLoaderSimple().outputs; + * const [pos] = ez.CLIPTextEncode(clip, { text: "positive" }).outputs; + * const [neg] = ez.CLIPTextEncode(clip, { text: "negative" }).outputs; + * const [latent] = ez.KSampler(model, pos, neg, ...ez.EmptyLatentImage().outputs).outputs; + * const [image] = ez.VAEDecode(latent, vae).outputs; + * const saveNode = ez.SaveImage(image); * console.log(saveNode); * graph.arrange(); * @param { app } app diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index 01c58b21f5c..eeccdb3d921 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -1,21 +1,28 @@ const { mockApi } = require("./setup"); const { Ez } = require("./ezgraph"); +const lg = require("./litegraph"); /** * - * @param { Parameters[0] } config + * @param { Parameters[0] & { resetEnv?: boolean } } config * @returns */ export async function start(config = undefined) { + if(config?.resetEnv) { + jest.resetModules(); + jest.resetAllMocks(); + lg.setup(global); + } + mockApi(config); const { app } = require("../../web/scripts/app"); await app.setup(); - return Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]); + return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app }; } /** - * @param { ReturnType["graph"] } graph - * @param { (hasReloaded: boolean) => (Promise | void) } cb + * @param { ReturnType["graph"] } graph + * @param { (hasReloaded: boolean) => (Promise | void) } cb */ export async function checkBeforeAndAfterReload(graph, cb) { await cb(false); @@ -24,10 +31,10 @@ export async function checkBeforeAndAfterReload(graph, cb) { } /** - * @param { string } name - * @param { Record } input + * @param { string } name + * @param { Record } input * @param { (string | string[])[] | Record } output - * @returns { Record } + * @returns { Record } */ export function makeNodeDef(name, input, output = {}) { const nodeDef = { @@ -37,19 +44,19 @@ export function makeNodeDef(name, input, output = {}) { output_name: [], output_is_list: [], input: { - required: {} + required: {}, }, }; - for(const k in input) { + for (const k in input) { nodeDef.input.required[k] = typeof input[k] === "string" ? [input[k], {}] : [...input[k]]; } - if(output instanceof Array) { + if (output instanceof Array) { output = output.reduce((p, c) => { p[c] = c; return p; - }, {}) + }, {}); } - for(const k in output) { + for (const k in output) { nodeDef.output.push(output[k]); nodeDef.output_name.push(k); nodeDef.output_is_list.push(false); @@ -68,4 +75,31 @@ export function assertNotNullOrUndefined(x) { expect(x).not.toEqual(null); expect(x).not.toEqual(undefined); return true; -} \ No newline at end of file +} + +/** + * + * @param { ReturnType["ez"] } ez + * @param { ReturnType["graph"] } graph + */ +export function createDefaultWorkflow(ez, graph) { + graph.clear(); + const ckpt = ez.CheckpointLoaderSimple(); + + const pos = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "positive" }); + const neg = ez.CLIPTextEncode(ckpt.outputs.CLIP, { text: "negative" }); + + const empty = ez.EmptyLatentImage(); + const sampler = ez.KSampler( + ckpt.outputs.MODEL, + pos.outputs.CONDITIONING, + neg.outputs.CONDITIONING, + empty.outputs.LATENT + ); + + const decode = ez.VAEDecode(sampler.outputs.LATENT, ckpt.outputs.VAE); + const save = ez.SaveImage(decode.outputs.IMAGE); + graph.arrange(); + + return { ckpt, pos, neg, empty, sampler, decode, save }; +} diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js index 17e8ac1ad28..dd150214a34 100644 --- a/tests-ui/utils/setup.js +++ b/tests-ui/utils/setup.js @@ -30,16 +30,20 @@ export function mockApi({ mockExtensions, mockNodeDefs } = {}) { mockNodeDefs = JSON.parse(fs.readFileSync(path.resolve("./data/object_info.json"))); } + const events = new EventTarget(); + const mockApi = { + addEventListener: events.addEventListener.bind(events), + removeEventListener: events.removeEventListener.bind(events), + dispatchEvent: events.dispatchEvent.bind(events), + getSystemStats: jest.fn(), + getExtensions: jest.fn(() => mockExtensions), + getNodeDefs: jest.fn(() => mockNodeDefs), + init: jest.fn(), + apiURL: jest.fn((x) => "../../web/" + x), + }; jest.mock("../../web/scripts/api", () => ({ get api() { - return { - addEventListener: jest.fn(), - getSystemStats: jest.fn(), - getExtensions: jest.fn(() => mockExtensions), - getNodeDefs: jest.fn(() => mockNodeDefs), - init: jest.fn(), - apiURL: jest.fn((x) => "../../web/" + x), - }; + return mockApi; }, })); } diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js new file mode 100644 index 00000000000..450b4f5f35c --- /dev/null +++ b/web/extensions/core/groupNode.js @@ -0,0 +1,1054 @@ +import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; +import { getWidgetType } from "../../scripts/widgets.js"; +import { mergeIfValid } from "./widgetInputs.js"; + +const GROUP = Symbol(); + +const Workflow = { + InUse: { + Free: 0, + Registered: 1, + InWorkflow: 2, + }, + isInUseGroupNode(name) { + const id = `workflow/${name}`; + // Check if lready registered/in use in this workflow + if (app.graph.extra?.groupNodes?.[name]) { + if (app.graph._nodes.find((n) => n.type === id)) { + return Workflow.InUse.InWorkflow; + } else { + return Workflow.InUse.Registered; + } + } + return Workflow.InUse.Free; + }, + storeGroupNode(name, data) { + let extra = app.graph.extra; + if (!extra) app.graph.extra = extra = {}; + let groupNodes = extra.groupNodes; + if (!groupNodes) extra.groupNodes = groupNodes = {}; + groupNodes[name] = data; + }, +}; + +class GroupNodeBuilder { + constructor(nodes) { + this.nodes = nodes; + } + + build() { + const name = this.getName(); + if (!name) return; + + // Sort the nodes so they are in execution order + // this allows for widgets to be in the correct order when reconstructing + this.sortNodes(); + + this.nodeData = this.getNodeData(); + Workflow.storeGroupNode(name, this.nodeData); + + return { name, nodeData: this.nodeData }; + } + + getName() { + const name = prompt("Enter group name"); + if (!name) return; + const used = Workflow.isInUseGroupNode(name); + switch (used) { + case Workflow.InUse.InWorkflow: + alert( + "An in use group node with this name already exists embedded in this workflow, please remove any instances or use a new name." + ); + return; + case Workflow.InUse.Registered: + if ( + !confirm( + "An group node with this name already exists embedded in this workflow, are you sure you want to overwrite it?" + ) + ) { + return; + } + break; + } + return name; + } + + sortNodes() { + // Gets the builders nodes in graph execution order + const nodesInOrder = app.graph.computeExecutionOrder(false); + this.nodes = this.nodes + .map((node) => ({ index: nodesInOrder.indexOf(node), node })) + .sort((a, b) => a.index - b.index || a.node.id - b.node.id) + .map(({ node }) => node); + } + + getNodeData() { + const storeLinkTypes = (config) => { + // Store link types for dynamically typed nodes e.g. reroutes + for (const link of config.links) { + const origin = app.graph.getNodeById(link[4]); + const type = origin.outputs[link[1]].type; + link.push(type); + } + }; + + const storeExternalLinks = (config) => { + // Store any external links to the group in the config so when rebuilding we add extra slots + config.external = []; + for (let i = 0; i < this.nodes.length; i++) { + const node = this.nodes[i]; + if (!node.outputs?.length) continue; + for (let slot = 0; slot < node.outputs.length; slot++) { + let hasExternal = false; + const output = node.outputs[slot]; + let type = output.type; + if (!output.links?.length) continue; + for (const l of output.links) { + const link = app.graph.links[l]; + if (!link) continue; + if (type === "*") type = link.type; + + if (!app.canvas.selected_nodes[link.target_id]) { + hasExternal = true; + break; + } + } + if (hasExternal) { + config.external.push([i, slot, type]); + } + } + } + }; + + // Use the built in copyToClipboard function to generate the node data we need + const backup = localStorage.getItem("litegrapheditor_clipboard"); + try { + app.canvas.copyToClipboard(this.nodes); + const config = JSON.parse(localStorage.getItem("litegrapheditor_clipboard")); + + storeLinkTypes(config); + storeExternalLinks(config); + + return config; + } finally { + localStorage.setItem("litegrapheditor_clipboard", backup); + } + } +} + +export class GroupNodeConfig { + constructor(name, nodeData) { + this.name = name; + this.nodeData = nodeData; + this.getLinks(); + + this.inputCount = 0; + this.oldToNewOutputMap = {}; + this.newToOldOutputMap = {}; + this.oldToNewInputMap = {}; + this.oldToNewWidgetMap = {}; + this.newToOldWidgetMap = {}; + this.primitiveDefs = {}; + this.widgetToPrimitive = {}; + this.primitiveToWidget = {}; + } + + async registerType(source = "workflow") { + this.nodeDef = { + output: [], + output_name: [], + output_is_list: [], + name: source + "/" + this.name, + display_name: this.name, + category: "group nodes" + ("/" + source), + input: { required: {} }, + + [GROUP]: this, + }; + + this.inputs = []; + const seenInputs = {}; + const seenOutputs = {}; + for (let i = 0; i < this.nodeData.nodes.length; i++) { + const node = this.nodeData.nodes[i]; + node.index = i; + this.processNode(node, seenInputs, seenOutputs); + } + await app.registerNodeDef("workflow/" + this.name, this.nodeDef); + } + + getLinks() { + this.linksFrom = {}; + this.linksTo = {}; + this.externalFrom = {}; + + // Extract links for easy lookup + for (const l of this.nodeData.links) { + const [sourceNodeId, sourceNodeSlot, targetNodeId, targetNodeSlot] = l; + + // Skip links outside the copy config + if (sourceNodeId == null) continue; + + if (!this.linksFrom[sourceNodeId]) { + this.linksFrom[sourceNodeId] = {}; + } + this.linksFrom[sourceNodeId][sourceNodeSlot] = l; + + if (!this.linksTo[targetNodeId]) { + this.linksTo[targetNodeId] = {}; + } + this.linksTo[targetNodeId][targetNodeSlot] = l; + } + + if (this.nodeData.external) { + for (const ext of this.nodeData.external) { + if (!this.externalFrom[ext[0]]) { + this.externalFrom[ext[0]] = { [ext[1]]: ext[2] }; + } else { + this.externalFrom[ext[0]][ext[1]] = ext[2]; + } + } + } + } + + processNode(node, seenInputs, seenOutputs) { + const def = this.getNodeDef(node); + if (!def) return; + + const inputs = { ...def.input?.required, ...def.input?.optional }; + + this.inputs.push(this.processNodeInputs(node, seenInputs, inputs)); + if (def.output?.length) this.processNodeOutputs(node, seenOutputs, def); + } + + getNodeDef(node) { + const def = globalDefs[node.type]; + if (def) return def; + + const linksFrom = this.linksFrom[node.index]; + if (node.type === "PrimitiveNode") { + // Skip as its not linked + if (!linksFrom) return; + + let type = linksFrom["0"][5]; + if (type === "COMBO") { + // Use the array items + const source = node.outputs[0].widget.name; + const fromTypeName = this.nodeData.nodes[linksFrom["0"][2]].type; + const fromType = globalDefs[fromTypeName]; + const input = fromType.input.required[source] ?? fromType.input.optional[source]; + type = input[0]; + } + + const def = (this.primitiveDefs[node.index] = { + input: { + required: { + value: [type, {}], + }, + }, + output: [type], + output_name: [], + output_is_list: [], + }); + return def; + } else if (node.type === "Reroute") { + const linksTo = this.linksTo[node.index]; + if (linksTo && linksFrom && !this.externalFrom[node.index]?.[0]) { + // Being used internally + return null; + } + + let rerouteType = "*"; + if (linksFrom) { + const [, , id, slot] = linksFrom["0"]; + rerouteType = this.nodeData.nodes[id].inputs[slot].type; + } else if (linksTo) { + const [id, slot] = linksTo["0"]; + rerouteType = this.nodeData.nodes[id].outputs[slot].type; + } else { + // Reroute used as a pipe + for (const l of this.nodeData.links) { + if (l[2] === node.index) { + rerouteType = l[5]; + break; + } + } + if (rerouteType === "*") { + // Check for an external link + const t = this.externalFrom[node.index]?.[0]; + if (t) { + rerouteType = t; + } + } + } + + return { + input: { + required: { + [rerouteType]: [rerouteType, {}], + }, + }, + output: [rerouteType], + output_name: [], + output_is_list: [], + }; + } + + console.warn("Skipping virtual node " + node.type + " when building group node " + this.name); + } + + getInputConfig(node, inputName, seenInputs, config, extra) { + let name = node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName; + let prefix = ""; + // Special handling for primitive to include the title if it is set rather than just "value" + if ((node.type === "PrimitiveNode" && node.title) || name in seenInputs) { + prefix = `${node.title ?? node.type} `; + name = `${prefix}${inputName}`; + if (name in seenInputs) { + name = `${prefix}${seenInputs[name]} ${inputName}`; + } + } + seenInputs[name] = (seenInputs[name] ?? 1) + 1; + + if (inputName === "seed" || inputName === "noise_seed") { + if (!extra) extra = {}; + extra.control_after_generate = `${prefix}control_after_generate`; + } + if (config[0] === "IMAGEUPLOAD") { + if (!extra) extra = {}; + extra.widget = `${prefix}${config[1]?.widget ?? "image"}`; + } + + if (extra) { + config = [config[0], { ...config[1], ...extra }]; + } + + return { name, config }; + } + + processWidgetInputs(inputs, node, inputNames, seenInputs) { + const slots = []; + const converted = new Map(); + const widgetMap = (this.oldToNewWidgetMap[node.index] = {}); + for (const inputName of inputNames) { + let widgetType = getWidgetType(inputs[inputName], inputName); + if (widgetType) { + const convertedIndex = node.inputs?.findIndex( + (inp) => inp.name === inputName && inp.widget?.name === inputName + ); + if (convertedIndex > -1) { + // This widget has been converted to a widget + // We need to store this in the correct position so link ids line up + converted.set(convertedIndex, inputName); + widgetMap[inputName] = null; + } else { + // Normal widget + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + this.nodeDef.input.required[name] = config; + widgetMap[inputName] = name; + this.newToOldWidgetMap[name] = { node, inputName }; + } + } else { + // Normal input + slots.push(inputName); + } + } + return { converted, slots }; + } + + checkPrimitiveConnection(link, inputName, inputs) { + const sourceNode = this.nodeData.nodes[link[0]]; + if (sourceNode.type === "PrimitiveNode") { + // Merge link configurations + const [sourceNodeId, _, targetNodeId, __] = link; + const primitiveDef = this.primitiveDefs[sourceNodeId]; + const targetWidget = inputs[inputName]; + const primitiveConfig = primitiveDef.input.required.value; + const output = { widget: primitiveConfig }; + const config = mergeIfValid(output, targetWidget, false, null, primitiveConfig); + primitiveConfig[1] = config?.customConfig ?? inputs[inputName][1] ? { ...inputs[inputName][1] } : {}; + + let name = this.oldToNewWidgetMap[sourceNodeId]["value"]; + name = name.substr(0, name.length - 6); + primitiveConfig[1].control_after_generate = true; + primitiveConfig[1].control_prefix = name; + + let toPrimitive = this.widgetToPrimitive[targetNodeId]; + if (!toPrimitive) { + toPrimitive = this.widgetToPrimitive[targetNodeId] = {}; + } + if (toPrimitive[inputName]) { + toPrimitive[inputName].push(sourceNodeId); + } + toPrimitive[inputName] = sourceNodeId; + + let toWidget = this.primitiveToWidget[sourceNodeId]; + if (!toWidget) { + toWidget = this.primitiveToWidget[sourceNodeId] = []; + } + toWidget.push({ nodeId: targetNodeId, inputName }); + } + } + + processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs) { + for (let i = 0; i < slots.length; i++) { + const inputName = slots[i]; + if (linksTo[i]) { + this.checkPrimitiveConnection(linksTo[i], inputName, inputs); + // This input is linked so we can skip it + continue; + } + + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + this.nodeDef.input.required[name] = config; + inputMap[i] = this.inputCount++; + } + } + + processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs) { + // Add converted widgets sorted into their index order (ordered as they were converted) so link ids match up + const convertedSlots = [...converted.keys()].sort().map((k) => converted.get(k)); + for (let i = 0; i < convertedSlots.length; i++) { + const inputName = convertedSlots[i]; + if (linksTo[slots.length + i]) { + this.checkPrimitiveConnection(linksTo[slots.length + i], inputName, inputs); + // This input is linked so we can skip it + continue; + } + + const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName], { + defaultInput: true, + }); + this.nodeDef.input.required[name] = config; + inputMap[slots.length + i] = this.inputCount++; + } + } + + processNodeInputs(node, seenInputs, inputs) { + const inputMapping = []; + + const inputNames = Object.keys(inputs); + if (!inputNames.length) return; + + const { converted, slots } = this.processWidgetInputs(inputs, node, inputNames, seenInputs); + const linksTo = this.linksTo[node.index] ?? {}; + const inputMap = (this.oldToNewInputMap[node.index] = {}); + this.processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs); + this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs); + + return inputMapping; + } + + processNodeOutputs(node, seenOutputs, def) { + const oldToNew = (this.oldToNewOutputMap[node.index] = {}); + + // Add outputs + for (let outputId = 0; outputId < def.output.length; outputId++) { + const linksFrom = this.linksFrom[node.index]; + if (linksFrom?.[outputId] && !this.externalFrom[node.index]?.[outputId]) { + // This output is linked internally so we can skip it + continue; + } + + oldToNew[outputId] = this.nodeDef.output.length; + this.newToOldOutputMap[this.nodeDef.output.length] = { node, slot: outputId }; + this.nodeDef.output.push(def.output[outputId]); + this.nodeDef.output_is_list.push(def.output_is_list[outputId]); + + let label = def.output_name?.[outputId] ?? def.output[outputId]; + const output = node.outputs.find((o) => o.name === label); + if (output?.label) { + label = output.label; + } + let name = label; + if (name in seenOutputs) { + const prefix = `${node.title ?? node.type} `; + name = `${prefix}${label}`; + if (name in seenOutputs) { + name = `${prefix}${node.index} ${label}`; + } + } + seenOutputs[name] = 1; + + this.nodeDef.output_name.push(name); + } + } + + static async registerFromWorkflow(groupNodes, missingNodeTypes) { + for (const g in groupNodes) { + const groupData = groupNodes[g]; + + let hasMissing = false; + for (const n of groupData.nodes) { + // Find missing node types + if (!(n.type in LiteGraph.registered_node_types)) { + missingNodeTypes.push(n.type); + hasMissing = true; + } + } + + if (hasMissing) continue; + + const config = new GroupNodeConfig(g, groupData); + await config.registerType(); + } + } +} + +export class GroupNodeHandler { + node; + groupData; + + constructor(node) { + this.node = node; + this.groupData = node.constructor?.nodeData?.[GROUP]; + + this.node.setInnerNodes = (innerNodes) => { + this.innerNodes = innerNodes; + + for (let innerNodeIndex = 0; innerNodeIndex < this.innerNodes.length; innerNodeIndex++) { + const innerNode = this.innerNodes[innerNodeIndex]; + + for (const w of innerNode.widgets ?? []) { + if (w.type === "converted-widget") { + w.serializeValue = w.origSerializeValue; + } + } + + innerNode.index = innerNodeIndex; + innerNode.getInputNode = (slot) => { + // Check if this input is internal or external + const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot]; + if (externalSlot != null) { + return this.node.getInputNode(externalSlot); + } + + // Internal link + const innerLink = this.groupData.linksTo[innerNode.index]?.[slot]; + if (!innerLink) return null; + + const inputNode = innerNodes[innerLink[0]]; + // Primitives will already apply their values + if (inputNode.type === "PrimitiveNode") return null; + + return inputNode; + }; + + innerNode.getInputLink = (slot) => { + const externalSlot = this.groupData.oldToNewInputMap[innerNode.index]?.[slot]; + if (externalSlot != null) { + // The inner node is connected via the group node inputs + const linkId = this.node.inputs[externalSlot].link; + let link = app.graph.links[linkId]; + + // Use the outer link, but update the target to the inner node + link = { + ...link, + target_id: innerNode.id, + target_slot: +slot, + }; + return link; + } + + let link = this.groupData.linksTo[innerNode.index]?.[slot]; + if (!link) return null; + // Use the inner link, but update the origin node to be inner node id + link = { + origin_id: innerNodes[link[0]].id, + origin_slot: link[1], + target_id: innerNode.id, + target_slot: +slot, + }; + return link; + }; + } + }; + + this.node.updateLink = (link) => { + // Replace the group node reference with the internal node + link = { ...link }; + const output = this.groupData.newToOldOutputMap[link.origin_slot]; + let innerNode = this.innerNodes[output.node.index]; + let l; + while (innerNode.type === "Reroute") { + l = innerNode.getInputLink(0); + innerNode = innerNode.getInputNode(0); + } + + link.origin_id = innerNode.id; + link.origin_slot = l?.origin_slot ?? output.slot; + return link; + }; + + this.node.getInnerNodes = () => { + if (!this.innerNodes) { + this.node.setInnerNodes( + this.groupData.nodeData.nodes.map((n, i) => { + const innerNode = LiteGraph.createNode(n.type); + innerNode.configure(n); + innerNode.id = `${this.node.id}:${i}`; + return innerNode; + }) + ); + } + + this.updateInnerWidgets(); + + return this.innerNodes; + }; + + this.node.convertToNodes = () => { + const addInnerNodes = () => { + const backup = localStorage.getItem("litegrapheditor_clipboard"); + // Clone the node data so we dont mutate it for other nodes + const c = { ...this.groupData.nodeData }; + c.nodes = [...c.nodes]; + const innerNodes = this.node.getInnerNodes(); + let ids = []; + for (let i = 0; i < c.nodes.length; i++) { + let id = innerNodes?.[i]?.id; + // Use existing IDs if they are set on the inner nodes + if (id == null || isNaN(id)) { + id = undefined; + } else { + ids.push(id); + } + c.nodes[i] = { ...c.nodes[i], id }; + } + localStorage.setItem("litegrapheditor_clipboard", JSON.stringify(c)); + app.canvas.pasteFromClipboard(); + localStorage.setItem("litegrapheditor_clipboard", backup); + + const [x, y] = this.node.pos; + let top; + let left; + // Configure nodes with current widget data + const selectedIds = ids.length ? ids : Object.keys(app.canvas.selected_nodes); + const newNodes = []; + for (let i = 0; i < selectedIds.length; i++) { + const id = selectedIds[i]; + const newNode = app.graph.getNodeById(id); + const innerNode = innerNodes[i]; + newNodes.push(newNode); + + if (left == null || newNode.pos[0] < left) { + left = newNode.pos[0]; + } + if (top == null || newNode.pos[1] < top) { + top = newNode.pos[1]; + } + + const map = this.groupData.oldToNewWidgetMap[innerNode.index]; + if (map) { + const widgets = Object.keys(map); + + for (const oldName of widgets) { + const newName = map[oldName]; + if (!newName) continue; + + const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); + if (widgetIndex === -1) continue; + + // Populate the main and any linked widgets + if (innerNode.type === "PrimitiveNode") { + for (let i = 0; i < newNode.widgets.length; i++) { + newNode.widgets[i].value = this.node.widgets[widgetIndex + i].value; + } + } else { + const outerWidget = this.node.widgets[widgetIndex]; + const newWidget = newNode.widgets.find((w) => w.name === oldName); + if (!newWidget) continue; + + newWidget.value = outerWidget.value; + for (let w = 0; w < outerWidget.linkedWidgets?.length; w++) { + newWidget.linkedWidgets[w].value = outerWidget.linkedWidgets[w].value; + } + } + } + } + } + + // Shift each node + for (const newNode of newNodes) { + newNode.pos = [newNode.pos[0] - (left - x), newNode.pos[1] - (top - y)]; + } + + return { newNodes, selectedIds }; + }; + + const reconnectInputs = (selectedIds) => { + for (const innerNodeIndex in this.groupData.oldToNewInputMap) { + const id = selectedIds[innerNodeIndex]; + const newNode = app.graph.getNodeById(id); + const map = this.groupData.oldToNewInputMap[innerNodeIndex]; + for (const innerInputId in map) { + const groupSlotId = map[innerInputId]; + if (groupSlotId == null) continue; + const slot = node.inputs[groupSlotId]; + if (slot.link == null) continue; + const link = app.graph.links[slot.link]; + // connect this node output to the input of another node + const originNode = app.graph.getNodeById(link.origin_id); + originNode.connect(link.origin_slot, newNode, +innerInputId); + } + } + }; + + const reconnectOutputs = () => { + for (let groupOutputId = 0; groupOutputId < node.outputs?.length; groupOutputId++) { + const output = node.outputs[groupOutputId]; + if (!output.links) continue; + const links = [...output.links]; + for (const l of links) { + const slot = this.groupData.newToOldOutputMap[groupOutputId]; + const link = app.graph.links[l]; + const targetNode = app.graph.getNodeById(link.target_id); + const newNode = app.graph.getNodeById(selectedIds[slot.node.index]); + newNode.connect(slot.slot, targetNode, link.target_slot); + } + } + }; + + const { newNodes, selectedIds } = addInnerNodes(); + reconnectInputs(selectedIds); + reconnectOutputs(selectedIds); + app.graph.remove(this.node); + + return newNodes; + }; + + const getExtraMenuOptions = this.node.getExtraMenuOptions; + this.node.getExtraMenuOptions = function (_, options) { + getExtraMenuOptions?.apply(this, arguments); + + let optionIndex = options.findIndex((o) => o.content === "Outputs"); + if (optionIndex === -1) optionIndex = options.length; + else optionIndex++; + options.splice(optionIndex, 0, null, { + content: "Convert to nodes", + callback: () => { + return this.convertToNodes(); + }, + }); + }; + + // Draw custom collapse icon to identity this as a group + const onDrawTitleBox = this.node.onDrawTitleBox; + this.node.onDrawTitleBox = function (ctx, height, size, scale) { + onDrawTitleBox?.apply(this, arguments); + + const fill = ctx.fillStyle; + ctx.beginPath(); + ctx.rect(11, -height + 11, 2, 2); + ctx.rect(14, -height + 11, 2, 2); + ctx.rect(17, -height + 11, 2, 2); + ctx.rect(11, -height + 14, 2, 2); + ctx.rect(14, -height + 14, 2, 2); + ctx.rect(17, -height + 14, 2, 2); + ctx.rect(11, -height + 17, 2, 2); + ctx.rect(14, -height + 17, 2, 2); + ctx.rect(17, -height + 17, 2, 2); + + ctx.fillStyle = this.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR; + ctx.fill(); + ctx.fillStyle = fill; + }; + + // Draw progress label + const onDrawForeground = node.onDrawForeground; + const groupData = this.groupData.nodeData; + node.onDrawForeground = function (ctx) { + const r = onDrawForeground?.apply?.(this, arguments); + if (+app.runningNodeId === this.id && this.runningInternalNodeId !== null) { + const n = groupData.nodes[this.runningInternalNodeId]; + const message = `Running ${n.title || n.type} (${this.runningInternalNodeId}/${groupData.nodes.length})`; + ctx.save(); + ctx.font = "12px sans-serif"; + const sz = ctx.measureText(message); + ctx.fillStyle = node.boxcolor || LiteGraph.NODE_DEFAULT_BOXCOLOR; + ctx.beginPath(); + ctx.roundRect(0, -LiteGraph.NODE_TITLE_HEIGHT - 20, sz.width + 12, 20, 5); + ctx.fill(); + + ctx.fillStyle = "#fff"; + ctx.fillText(message, 6, -LiteGraph.NODE_TITLE_HEIGHT - 6); + ctx.restore(); + } + }; + + // Flag this node as needing to be reset + const onExecutionStart = this.node.onExecutionStart; + this.node.onExecutionStart = function () { + this.resetExecution = true; + return onExecutionStart?.apply(this, arguments); + }; + + function handleEvent(type, getId, getEvent) { + const handler = ({ detail }) => { + const id = getId(detail); + if (!id) return; + const node = app.graph.getNodeById(id); + if (node) return; + + const innerNodeIndex = this.innerNodes?.findIndex((n) => n.id == id); + if (innerNodeIndex > -1) { + this.node.runningInternalNodeId = innerNodeIndex; + api.dispatchEvent(new CustomEvent(type, { detail: getEvent(detail, this.node.id + "", this.node) })); + } + }; + api.addEventListener(type, handler); + return handler; + } + + const executing = handleEvent.call( + this, + "executing", + (d) => d, + (d, id, node) => id + ); + + const executed = handleEvent.call( + this, + "executed", + (d) => d?.node, + (d, id, node) => ({ ...d, node: id, merge: !node.resetExecution }) + ); + + const onRemoved = node.onRemoved; + this.node.onRemoved = function () { + onRemoved?.apply(this, arguments); + api.removeEventListener("executing", executing); + api.removeEventListener("executed", executed); + }; + } + + updateInnerWidgets() { + for (const newWidgetName in this.groupData.newToOldWidgetMap) { + const newWidget = this.node.widgets.find((w) => w.name === newWidgetName); + if (!newWidget) continue; + + const newValue = newWidget.value; + const old = this.groupData.newToOldWidgetMap[newWidgetName]; + let innerNode = this.innerNodes[old.node.index]; + + if (innerNode.type === "PrimitiveNode") { + innerNode.primitiveValue = newValue; + const primitiveLinked = this.groupData.primitiveToWidget[old.node.index]; + for (const linked of primitiveLinked) { + const node = this.innerNodes[linked.nodeId]; + const widget = node.widgets.find((w) => w.name === linked.inputName); + + if (widget) { + widget.value = newValue; + } + } + continue; + } + + const widget = innerNode.widgets?.find((w) => w.name === old.inputName); + if (widget) { + widget.value = newValue; + } + } + } + + populatePrimitive(node, nodeId, oldName, i, linkedShift) { + // Converted widget, populate primitive if linked + const primitiveId = this.groupData.widgetToPrimitive[nodeId]?.[oldName]; + if (primitiveId == null) return; + const targetWidgetName = this.groupData.oldToNewWidgetMap[primitiveId]["value"]; + const targetWidgetIndex = this.node.widgets.findIndex((w) => w.name === targetWidgetName); + if (targetWidgetIndex > -1) { + const primitiveNode = this.innerNodes[primitiveId]; + let len = primitiveNode.widgets.length; + if (len - 1 !== this.node.widgets[targetWidgetIndex].linkedWidgets?.length) { + // Fallback handling for if some reason the primitive has a different number of widgets + // we dont want to overwrite random widgets, better to leave blank + len = 1; + } + for (let i = 0; i < len; i++) { + this.node.widgets[targetWidgetIndex + i].value = primitiveNode.widgets[i].value; + } + } + } + + populateWidgets() { + for (let nodeId = 0; nodeId < this.groupData.nodeData.nodes.length; nodeId++) { + const node = this.groupData.nodeData.nodes[nodeId]; + + if (!node.widgets_values?.length) continue; + + const map = this.groupData.oldToNewWidgetMap[nodeId]; + const widgets = Object.keys(map); + + let linkedShift = 0; + for (let i = 0; i < widgets.length; i++) { + const oldName = widgets[i]; + const newName = map[oldName]; + const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); + const mainWidget = this.node.widgets[widgetIndex]; + if (!newName) { + // New name will be null if its a converted widget + this.populatePrimitive(node, nodeId, oldName, i, linkedShift); + + // Find the inner widget and shift by the number of linked widgets as they will have been removed too + const innerWidget = this.innerNodes[nodeId].widgets?.find((w) => w.name === oldName); + linkedShift += innerWidget.linkedWidgets?.length ?? 0; + continue; + } + + if (widgetIndex === -1) { + continue; + } + + // Populate the main and any linked widget + mainWidget.value = node.widgets_values[i + linkedShift]; + for (let w = 0; w < mainWidget.linkedWidgets?.length; w++) { + this.node.widgets[widgetIndex + w + 1].value = node.widgets_values[i + ++linkedShift]; + } + } + } + } + + replaceNodes(nodes) { + let top; + let left; + + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i]; + if (left == null || node.pos[0] < left) { + left = node.pos[0]; + } + if (top == null || node.pos[1] < top) { + top = node.pos[1]; + } + + this.linkOutputs(node, i); + app.graph.remove(node); + } + + this.linkInputs(); + this.node.pos = [left, top]; + } + + linkOutputs(originalNode, nodeId) { + if (!originalNode.outputs) return; + + for (const output of originalNode.outputs) { + if (!output.links) continue; + // Clone the links as they'll be changed if we reconnect + const links = [...output.links]; + for (const l of links) { + const link = app.graph.links[l]; + if (!link) continue; + + const targetNode = app.graph.getNodeById(link.target_id); + const newSlot = this.groupData.oldToNewOutputMap[nodeId]?.[link.origin_slot]; + if (newSlot != null) { + this.node.connect(newSlot, targetNode, link.target_slot); + } + } + } + } + + linkInputs() { + for (const link of this.groupData.nodeData.links ?? []) { + const [, originSlot, targetId, targetSlot, actualOriginId] = link; + const originNode = app.graph.getNodeById(actualOriginId); + if (!originNode) continue; // this node is in the group + originNode.connect(originSlot, this.node.id, this.groupData.oldToNewInputMap[targetId][targetSlot]); + } + } + + static getGroupData(node) { + return node.constructor?.nodeData?.[GROUP]; + } + + static isGroupNode(node) { + return !!node.constructor?.nodeData?.[GROUP]; + } + + static async fromNodes(nodes) { + // Process the nodes into the stored workflow group node data + const builder = new GroupNodeBuilder(nodes); + const res = builder.build(); + if (!res) return; + + const { name, nodeData } = res; + + // Convert this data into a LG node definition and register it + const config = new GroupNodeConfig(name, nodeData); + await config.registerType(); + + const groupNode = LiteGraph.createNode(`workflow/${name}`); + // Reuse the existing nodes for this instance + groupNode.setInnerNodes(builder.nodes); + groupNode[GROUP].populateWidgets(); + app.graph.add(groupNode); + + // Remove all converted nodes and relink them + groupNode[GROUP].replaceNodes(builder.nodes); + return groupNode; + } +} + +function addConvertToGroupOptions() { + function addOption(options, index) { + const selected = Object.values(app.canvas.selected_nodes ?? {}); + const disabled = selected.length < 2 || selected.find((n) => GroupNodeHandler.isGroupNode(n)); + options.splice(index + 1, null, { + content: `Convert to Group Node`, + disabled, + callback: async () => { + return await GroupNodeHandler.fromNodes(selected); + }, + }); + } + + // Add to canvas + const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; + LGraphCanvas.prototype.getCanvasMenuOptions = function () { + const options = getCanvasMenuOptions.apply(this, arguments); + const index = options.findIndex((o) => o?.content === "Add Group") + 1 || opts.length; + addOption(options, index); + return options; + }; + + // Add to nodes + const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions; + LGraphCanvas.prototype.getNodeMenuOptions = function (node) { + const options = getNodeMenuOptions.apply(this, arguments); + if (!GroupNodeHandler.isGroupNode(node)) { + const index = options.findIndex((o) => o?.content === "Outputs") + 1 || opts.length - 1; + addOption(options, index); + } + return options; + }; +} + +const id = "Comfy.GroupNode"; +let globalDefs; +const ext = { + name: id, + setup() { + addConvertToGroupOptions(); + }, + async beforeConfigureGraph(graphData, missingNodeTypes) { + const nodes = graphData?.extra?.groupNodes; + if (nodes) { + await GroupNodeConfig.registerFromWorkflow(nodes, missingNodeTypes); + } + }, + addCustomNodeDefs(defs) { + // Store this so we can mutate it later with group nodes + globalDefs = defs; + }, + nodeCreated(node) { + if (GroupNodeHandler.isGroupNode(node)) { + node[GROUP] = new GroupNodeHandler(node); + } + }, +}; + +app.registerExtension(ext); diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index b6479f454da..2d4821742d1 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -1,5 +1,6 @@ import { app } from "../../scripts/app.js"; import { ComfyDialog, $el } from "../../scripts/ui.js"; +import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js"; // Adds the ability to save and add multiple nodes as a template // To save: @@ -34,7 +35,7 @@ class ManageTemplates extends ComfyDialog { type: "file", accept: ".json", multiple: true, - style: {display: "none"}, + style: { display: "none" }, parent: document.body, onchange: () => this.importAll(), }); @@ -109,13 +110,13 @@ class ManageTemplates extends ComfyDialog { return; } - const json = JSON.stringify({templates: this.templates}, null, 2); // convert the data to a JSON string - const blob = new Blob([json], {type: "application/json"}); + const json = JSON.stringify({ templates: this.templates }, null, 2); // convert the data to a JSON string + const blob = new Blob([json], { type: "application/json" }); const url = URL.createObjectURL(blob); const a = $el("a", { href: url, download: "node_templates.json", - style: {display: "none"}, + style: { display: "none" }, parent: document.body, }); a.click(); @@ -291,11 +292,11 @@ app.registerExtension({ setup() { const manage = new ManageTemplates(); - const clipboardAction = (cb) => { + const clipboardAction = async (cb) => { // We use the clipboard functions but dont want to overwrite the current user clipboard // Restore it after we've run our callback const old = localStorage.getItem("litegrapheditor_clipboard"); - cb(); + await cb(); localStorage.setItem("litegrapheditor_clipboard", old); }; @@ -309,13 +310,31 @@ app.registerExtension({ disabled: !Object.keys(app.canvas.selected_nodes || {}).length, callback: () => { const name = prompt("Enter name"); - if (!name || !name.trim()) return; + if (!name?.trim()) return; clipboardAction(() => { app.canvas.copyToClipboard(); + let data = localStorage.getItem("litegrapheditor_clipboard"); + data = JSON.parse(data); + const nodeIds = Object.keys(app.canvas.selected_nodes); + for (let i = 0; i < nodeIds.length; i++) { + const node = app.graph.getNodeById(nodeIds[i]); + const nodeData = node?.constructor.nodeData; + + let groupData = GroupNodeHandler.getGroupData(node); + if (groupData) { + groupData = groupData.nodeData; + if (!data.groupNodes) { + data.groupNodes = {}; + } + data.groupNodes[nodeData.name] = groupData; + data.nodes[i].type = nodeData.name; + } + } + manage.templates.push({ name, - data: localStorage.getItem("litegrapheditor_clipboard"), + data: JSON.stringify(data), }); manage.store(); }); @@ -323,15 +342,19 @@ app.registerExtension({ }); // Map each template to a menu item - const subItems = manage.templates.map((t) => ({ - content: t.name, - callback: () => { - clipboardAction(() => { - localStorage.setItem("litegrapheditor_clipboard", t.data); - app.canvas.pasteFromClipboard(); - }); - }, - })); + const subItems = manage.templates.map((t) => { + return { + content: t.name, + callback: () => { + clipboardAction(async () => { + const data = JSON.parse(t.data); + await GroupNodeConfig.registerFromWorkflow(data.groupNodes, {}); + localStorage.setItem("litegrapheditor_clipboard", t.data); + app.canvas.pasteFromClipboard(); + }); + }, + }; + }); subItems.push(null, { content: "Manage", diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 5c8fbc9b2d3..b6fa411f7e1 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -121,6 +121,110 @@ function isValidCombo(combo, obj) { return true; } +export function mergeIfValid(output, config2, forceUpdate, recreateWidget, config1) { + if (!config1) { + config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); + } + + if (config1[0] instanceof Array) { + if (!isValidCombo(config1[0], config2[0])) return false; + } else if (config1[0] !== config2[0]) { + // Types dont match + console.log(`connection rejected: types dont match`, config1[0], config2[0]); + return false; + } + + const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]); + + let customConfig; + const getCustomConfig = () => { + if (!customConfig) { + if (typeof structuredClone === "undefined") { + customConfig = JSON.parse(JSON.stringify(config1[1] ?? {})); + } else { + customConfig = structuredClone(config1[1] ?? {}); + } + } + return customConfig; + }; + + const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; + for (const k of keys.values()) { + if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { + let v1 = config1[1][k]; + let v2 = config2[1]?.[k]; + + if (v1 === v2 || (!v1 && !v2)) continue; + + if (isNumber) { + if (k === "min") { + const theirMax = config2[1]?.["max"]; + if (theirMax != null && v1 > theirMax) { + console.log("connection rejected: min > max", v1, theirMax); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); + continue; + } else if (k === "max") { + const theirMin = config2[1]?.["min"]; + if (theirMin != null && v1 < theirMin) { + console.log("connection rejected: max < min", v1, theirMin); + return false; + } + getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); + continue; + } else if (k === "step") { + let step; + if (v1 == null) { + // No current step + step = v2; + } else if (v2 == null) { + // No new step + step = v1; + } else { + if (v1 < v2) { + // Ensure v1 is larger for the mod + const a = v2; + v2 = v1; + v1 = a; + } + if (v1 % v2) { + console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2); + return false; + } + + step = v1; + } + + getCustomConfig()[k] = step; + continue; + } + } + + console.log(`connection rejected: config ${k} values dont match`, v1, v2); + return false; + } + } + + if (customConfig || forceUpdate) { + if (customConfig) { + output.widget[CONFIG] = [config1[0], customConfig]; + } + + const widget = recreateWidget?.call(this); + // When deleting a node this can be null + if (widget) { + const min = widget.options.min; + const max = widget.options.max; + if (min != null && widget.value < min) widget.value = min; + if (max != null && widget.value > max) widget.value = max; + widget.callback(widget.value); + } + } + + return { customConfig }; +} + app.registerExtension({ name: "Comfy.WidgetInputs", async beforeRegisterNodeDef(nodeType, nodeData, app) { @@ -308,7 +412,7 @@ app.registerExtension({ this.isVirtualNode = true; } - applyToGraph() { + applyToGraph(extraLinks = []) { if (!this.outputs[0].links?.length) return; function get_links(node) { @@ -325,10 +429,9 @@ app.registerExtension({ return links; } - let links = get_links(this); + let links = [...get_links(this).map((l) => app.graph.links[l]), ...extraLinks]; // For each output link copy our value over the original widget value - for (const l of links) { - const linkInfo = app.graph.links[l]; + for (const linkInfo of links) { const node = this.graph.getNodeById(linkInfo.target_id); const input = node.inputs[linkInfo.target_slot]; const widgetName = input.widget.name; @@ -405,7 +508,12 @@ app.registerExtension({ } if (this.outputs[slot].links?.length) { - return this.#isValidConnection(input); + const valid = this.#isValidConnection(input); + if (valid) { + // On connect of additional outputs, copy our value to their widget + this.applyToGraph([{ target_id: target_node.id, target_slot }]); + } + return valid; } } @@ -462,12 +570,12 @@ app.registerExtension({ } } - if (widget.type === "number" || widget.type === "combo") { + if (!inputData?.[1]?.control_after_generate && (widget.type === "number" || widget.type === "combo")) { let control_value = this.widgets_values?.[1]; if (!control_value) { control_value = "fixed"; } - addValueControlWidgets(this, widget, control_value); + addValueControlWidgets(this, widget, control_value, undefined, inputData); let filter = this.widgets_values?.[2]; if(filter && this.widgets.length === 3) { this.widgets[2].value = filter; @@ -507,6 +615,7 @@ app.registerExtension({ this.#removeWidgets(); this.#onFirstConnection(true); for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i]; + return this.widgets[0]; } #mergeWidgetConfig() { @@ -547,108 +656,8 @@ app.registerExtension({ #isValidConnection(input, forceUpdate) { // Only allow connections where the configs match const output = this.outputs[0]; - const config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); const config2 = input.widget[GET_CONFIG](); - - if (config1[0] instanceof Array) { - if (!isValidCombo(config1[0], config2[0])) return false; - } else if (config1[0] !== config2[0]) { - // Types dont match - console.log(`connection rejected: types dont match`, config1[0], config2[0]); - return false; - } - - const keys = new Set([...Object.keys(config1[1] ?? {}), ...Object.keys(config2[1] ?? {})]); - - let customConfig; - const getCustomConfig = () => { - if (!customConfig) { - if (typeof structuredClone === "undefined") { - customConfig = JSON.parse(JSON.stringify(config1[1] ?? {})); - } else { - customConfig = structuredClone(config1[1] ?? {}); - } - } - return customConfig; - }; - - const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; - for (const k of keys.values()) { - if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { - let v1 = config1[1][k]; - let v2 = config2[1][k]; - - if (v1 === v2 || (!v1 && !v2)) continue; - - if (isNumber) { - if (k === "min") { - const theirMax = config2[1]["max"]; - if (theirMax != null && v1 > theirMax) { - console.log("connection rejected: min > max", v1, theirMax); - return false; - } - getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.max(v1, v2); - continue; - } else if (k === "max") { - const theirMin = config2[1]["min"]; - if (theirMin != null && v1 < theirMin) { - console.log("connection rejected: max < min", v1, theirMin); - return false; - } - getCustomConfig()[k] = v1 == null ? v2 : v2 == null ? v1 : Math.min(v1, v2); - continue; - } else if (k === "step") { - let step; - if (v1 == null) { - // No current step - step = v2; - } else if (v2 == null) { - // No new step - step = v1; - } else { - if (v1 < v2) { - // Ensure v1 is larger for the mod - const a = v2; - v2 = v1; - v1 = a; - } - if (v1 % v2) { - console.log("connection rejected: steps not divisible", "current:", v1, "new:", v2); - return false; - } - - step = v1; - } - - getCustomConfig()[k] = step; - continue; - } - } - - console.log(`connection rejected: config ${k} values dont match`, v1, v2); - return false; - } - } - - if (customConfig || forceUpdate) { - if (customConfig) { - output.widget[CONFIG] = [config1[0], customConfig]; - } - - this.#recreateWidget(); - - const widget = this.widgets[0]; - // When deleting a node this can be null - if (widget) { - const min = widget.options.min; - const max = widget.options.max; - if (min != null && widget.value < min) widget.value = min; - if (max != null && widget.value > max) widget.value = max; - widget.callback(widget.value); - } - } - - return true; + return !!mergeIfValid.call(this, output, config2, forceUpdate, this.#recreateWidget); } #removeWidgets() { diff --git a/web/scripts/app.js b/web/scripts/app.js index cd20c40fd0a..e9cfb277dd4 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,5 +1,5 @@ import { ComfyLogging } from "./logging.js"; -import { ComfyWidgets } from "./widgets.js"; +import { ComfyWidgets, getWidgetType } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; @@ -779,7 +779,7 @@ export class ComfyApp { * Adds a handler on paste that extracts and loads images or workflows from pasted JSON data */ #addPasteHandler() { - document.addEventListener("paste", (e) => { + document.addEventListener("paste", async (e) => { // ctrl+shift+v is used to paste nodes with connections // this is handled by litegraph if(this.shiftDown) return; @@ -827,7 +827,7 @@ export class ComfyApp { } if (workflow && workflow.version && workflow.nodes && workflow.extra) { - this.loadGraphData(workflow); + await this.loadGraphData(workflow); } else { if (e.target.type === "text" || e.target.type === "textarea") { @@ -1177,7 +1177,19 @@ export class ComfyApp { }); api.addEventListener("executed", ({ detail }) => { - this.nodeOutputs[detail.node] = detail.output; + const output = this.nodeOutputs[detail.node]; + if (detail.merge && output) { + for (const k in detail.output ?? {}) { + const v = output[k]; + if (v instanceof Array) { + output[k] = v.concat(detail.output[k]); + } else { + output[k] = detail.output[k]; + } + } + } else { + this.nodeOutputs[detail.node] = detail.output; + } const node = this.graph.getNodeById(detail.node); if (node) { if (node.onExecuted) @@ -1292,6 +1304,7 @@ export class ComfyApp { this.#addProcessMouseHandler(); this.#addProcessKeyHandler(); this.#addConfigureHandler(); + this.#addApiUpdateHandlers(); this.graph = new LGraph(); @@ -1328,7 +1341,7 @@ export class ComfyApp { const json = localStorage.getItem("workflow"); if (json) { const workflow = JSON.parse(json); - this.loadGraphData(workflow); + await this.loadGraphData(workflow); restored = true; } } catch (err) { @@ -1337,7 +1350,7 @@ export class ComfyApp { // We failed to restore a workflow so load the default if (!restored) { - this.loadGraphData(); + await this.loadGraphData(); } // Save current workflow automatically @@ -1345,7 +1358,6 @@ export class ComfyApp { this.#addDrawNodeHandler(); this.#addDrawGroupsHandler(); - this.#addApiUpdateHandlers(); this.#addDropHandler(); this.#addCopyHandler(); this.#addPasteHandler(); @@ -1365,11 +1377,81 @@ export class ComfyApp { await this.#invokeExtensionsAsync("registerCustomNodes"); } + async registerNodeDef(nodeId, nodeData) { + const self = this; + const node = Object.assign( + function ComfyNode() { + var inputs = nodeData["input"]["required"]; + if (nodeData["input"]["optional"] != undefined) { + inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]); + } + const config = { minWidth: 1, minHeight: 1 }; + for (const inputName in inputs) { + const inputData = inputs[inputName]; + const type = inputData[0]; + + let widgetCreated = true; + const widgetType = getWidgetType(inputData, inputName); + if(widgetType) { + if(widgetType === "COMBO") { + Object.assign(config, self.widgets.COMBO(this, inputName, inputData, app) || {}); + } else { + Object.assign(config, self.widgets[widgetType](this, inputName, inputData, app) || {}); + } + } else { + // Node connection inputs + this.addInput(inputName, type); + widgetCreated = false; + } + + if(widgetCreated && inputData[1]?.forceInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.forceInput = inputData[1].forceInput; + } + if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { + if (!config.widget.options) config.widget.options = {}; + config.widget.options.defaultInput = inputData[1].defaultInput; + } + } + + for (const o in nodeData["output"]) { + let output = nodeData["output"][o]; + if(output instanceof Array) output = "COMBO"; + const outputName = nodeData["output_name"][o] || output; + const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ; + this.addOutput(outputName, output, { shape: outputShape }); + } + + const s = this.computeSize(); + s[0] = Math.max(config.minWidth, s[0] * 1.5); + s[1] = Math.max(config.minHeight, s[1]); + this.size = s; + this.serialize_widgets = true; + + app.#invokeExtensionsAsync("nodeCreated", this); + }, + { + title: nodeData.display_name || nodeData.name, + comfyClass: nodeData.name, + nodeData + } + ); + node.prototype.comfyClass = nodeData.name; + + this.#addNodeContextMenuHandler(node); + this.#addDrawBackgroundHandler(node, app); + this.#addNodeKeyHandler(node); + + await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData); + LiteGraph.registerNodeType(nodeId, node); + node.category = nodeData.category; + } + async registerNodesFromDefs(defs) { await this.#invokeExtensionsAsync("addCustomNodeDefs", defs); // Generate list of known widgets - const widgets = Object.assign( + this.widgets = Object.assign( {}, ComfyWidgets, ...(await this.#invokeExtensionsAsync("getCustomWidgets")).filter(Boolean) @@ -1377,75 +1459,7 @@ export class ComfyApp { // Register a node for each definition for (const nodeId in defs) { - const nodeData = defs[nodeId]; - const node = Object.assign( - function ComfyNode() { - var inputs = nodeData["input"]["required"]; - if (nodeData["input"]["optional"] != undefined){ - inputs = Object.assign({}, nodeData["input"]["required"], nodeData["input"]["optional"]) - } - const config = { minWidth: 1, minHeight: 1 }; - for (const inputName in inputs) { - const inputData = inputs[inputName]; - const type = inputData[0]; - - let widgetCreated = true; - if (Array.isArray(type)) { - // Enums - Object.assign(config, widgets.COMBO(this, inputName, inputData, app) || {}); - } else if (`${type}:${inputName}` in widgets) { - // Support custom widgets by Type:Name - Object.assign(config, widgets[`${type}:${inputName}`](this, inputName, inputData, app) || {}); - } else if (type in widgets) { - // Standard type widgets - Object.assign(config, widgets[type](this, inputName, inputData, app) || {}); - } else { - // Node connection inputs - this.addInput(inputName, type); - widgetCreated = false; - } - - if(widgetCreated && inputData[1]?.forceInput && config?.widget) { - if (!config.widget.options) config.widget.options = {}; - config.widget.options.forceInput = inputData[1].forceInput; - } - if(widgetCreated && inputData[1]?.defaultInput && config?.widget) { - if (!config.widget.options) config.widget.options = {}; - config.widget.options.defaultInput = inputData[1].defaultInput; - } - } - - for (const o in nodeData["output"]) { - let output = nodeData["output"][o]; - if(output instanceof Array) output = "COMBO"; - const outputName = nodeData["output_name"][o] || output; - const outputShape = nodeData["output_is_list"][o] ? LiteGraph.GRID_SHAPE : LiteGraph.CIRCLE_SHAPE ; - this.addOutput(outputName, output, { shape: outputShape }); - } - - const s = this.computeSize(); - s[0] = Math.max(config.minWidth, s[0] * 1.5); - s[1] = Math.max(config.minHeight, s[1]); - this.size = s; - this.serialize_widgets = true; - - app.#invokeExtensionsAsync("nodeCreated", this); - }, - { - title: nodeData.display_name || nodeData.name, - comfyClass: nodeData.name, - nodeData - } - ); - node.prototype.comfyClass = nodeData.name; - - this.#addNodeContextMenuHandler(node); - this.#addDrawBackgroundHandler(node, app); - this.#addNodeKeyHandler(node); - - await this.#invokeExtensionsAsync("beforeRegisterNodeDef", node, nodeData); - LiteGraph.registerNodeType(nodeId, node); - node.category = nodeData.category; + this.registerNodeDef(nodeId, defs[nodeId]); } } @@ -1488,9 +1502,14 @@ export class ComfyApp { showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { this.ui.dialog.show( - `When loading the graph, the following node types were not found:
    ${Array.from(new Set(missingNodeTypes)).map( - (t) => `
  • ${t}
  • ` - ).join("")}
${hasAddedNodes ? "Nodes that have failed to load will show as red on the graph." : ""}` + $el("div", [ + $el("span", { textContent: "When loading the graph, the following node types were not found: " }), + $el( + "ul", + Array.from(new Set(missingNodeTypes)).map((t) => $el("li", { textContent: t })) + ), + ...(hasAddedNodes ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] : []), + ]) ); this.logging.addEntry("Comfy.App", "warn", { MissingNodes: missingNodeTypes, @@ -1501,7 +1520,7 @@ export class ComfyApp { * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object */ - loadGraphData(graphData) { + async loadGraphData(graphData) { this.clean(); let reset_invalid_values = false; @@ -1519,6 +1538,7 @@ export class ComfyApp { } const missingNodeTypes = []; + await this.#invokeExtensionsAsync("beforeConfigureGraph", graphData, missingNodeTypes); for (let n of graphData.nodes) { // Patch T2IAdapterLoader to ControlNetLoader since they are the same node now if (n.type == "T2IAdapterLoader") n.type = "ControlNetLoader"; @@ -1527,8 +1547,8 @@ export class ComfyApp { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { - n.type = sanitizeNodeName(n.type); missingNodeTypes.push(n.type); + n.type = sanitizeNodeName(n.type); } } @@ -1627,92 +1647,98 @@ export class ComfyApp { * @returns The workflow and node links */ async graphToPrompt() { - for (const node of this.graph.computeExecutionOrder(false)) { - if (node.isVirtualNode) { - // Don't serialize frontend only nodes but let them make changes - if (node.applyToGraph) { - node.applyToGraph(); + for (const outerNode of this.graph.computeExecutionOrder(false)) { + const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + for (const node of innerNodes) { + if (node.isVirtualNode) { + // Don't serialize frontend only nodes but let them make changes + if (node.applyToGraph) { + node.applyToGraph(); + } } - continue; } } const workflow = this.graph.serialize(); const output = {}; // Process nodes in order of execution - for (const node of this.graph.computeExecutionOrder(false)) { - const n = workflow.nodes.find((n) => n.id === node.id); - - if (node.isVirtualNode) { - continue; - } + for (const outerNode of this.graph.computeExecutionOrder(false)) { + const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + for (const node of innerNodes) { + if (node.isVirtualNode) { + continue; + } - if (node.mode === 2 || node.mode === 4) { - // Don't serialize muted nodes - continue; - } + if (node.mode === 2 || node.mode === 4) { + // Don't serialize muted nodes + continue; + } - const inputs = {}; - const widgets = node.widgets; + const inputs = {}; + const widgets = node.widgets; - // Store all widget values - if (widgets) { - for (const i in widgets) { - const widget = widgets[i]; - if (!widget.options || widget.options.serialize !== false) { - inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(n, i) : widget.value; + // Store all widget values + if (widgets) { + for (const i in widgets) { + const widget = widgets[i]; + if (!widget.options || widget.options.serialize !== false) { + inputs[widget.name] = widget.serializeValue ? await widget.serializeValue(node, i) : widget.value; + } } } - } - // Store all node links - for (let i in node.inputs) { - let parent = node.getInputNode(i); - if (parent) { - let link = node.getInputLink(i); - while (parent.mode === 4 || parent.isVirtualNode) { - let found = false; - if (parent.isVirtualNode) { - link = parent.getInputLink(link.origin_slot); - if (link) { - parent = parent.getInputNode(link.target_slot); - if (parent) { - found = true; + // Store all node links + for (let i in node.inputs) { + let parent = node.getInputNode(i); + if (parent) { + let link = node.getInputLink(i); + while (parent.mode === 4 || parent.isVirtualNode) { + let found = false; + if (parent.isVirtualNode) { + link = parent.getInputLink(link.origin_slot); + if (link) { + parent = parent.getInputNode(link.target_slot); + if (parent) { + found = true; + } } - } - } else if (link && parent.mode === 4) { - let all_inputs = [link.origin_slot]; - if (parent.inputs) { - all_inputs = all_inputs.concat(Object.keys(parent.inputs)) - for (let parent_input in all_inputs) { - parent_input = all_inputs[parent_input]; - if (parent.inputs[parent_input]?.type === node.inputs[i].type) { - link = parent.getInputLink(parent_input); - if (link) { - parent = parent.getInputNode(parent_input); + } else if (link && parent.mode === 4) { + let all_inputs = [link.origin_slot]; + if (parent.inputs) { + all_inputs = all_inputs.concat(Object.keys(parent.inputs)) + for (let parent_input in all_inputs) { + parent_input = all_inputs[parent_input]; + if (parent.inputs[parent_input]?.type === node.inputs[i].type) { + link = parent.getInputLink(parent_input); + if (link) { + parent = parent.getInputNode(parent_input); + } + found = true; + break; } - found = true; - break; } } } - } - if (!found) { - break; + if (!found) { + break; + } } - } - if (link) { - inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; + if (link) { + if (parent?.updateLink) { + link = parent.updateLink(link); + } + inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; + } } } - } - output[String(node.id)] = { - inputs, - class_type: node.comfyClass, - }; + output[String(node.id)] = { + inputs, + class_type: node.comfyClass, + }; + } } // Remove inputs connected to removed nodes @@ -1832,7 +1858,7 @@ export class ComfyApp { const pngInfo = await getPngMetadata(file); if (pngInfo) { if (pngInfo.workflow) { - this.loadGraphData(JSON.parse(pngInfo.workflow)); + await this.loadGraphData(JSON.parse(pngInfo.workflow)); } else if (pngInfo.parameters) { importA1111(this.graph, pngInfo.parameters); } @@ -1848,21 +1874,21 @@ export class ComfyApp { } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { const reader = new FileReader(); - reader.onload = () => { + reader.onload = async () => { const jsonContent = JSON.parse(reader.result); if (jsonContent?.templates) { this.loadTemplateData(jsonContent); } else if(this.isApiJson(jsonContent)) { this.loadApiJson(jsonContent); } else { - this.loadGraphData(jsonContent); + await this.loadGraphData(jsonContent); } }; reader.readAsText(file); } else if (file.name?.endsWith(".latent") || file.name?.endsWith(".safetensors")) { const info = await getLatentMetadata(file); if (info.workflow) { - this.loadGraphData(JSON.parse(info.workflow)); + await this.loadGraphData(JSON.parse(info.workflow)); } } } diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 07da591cb7d..37d26f3c5ef 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -44,7 +44,7 @@ function getClipPath(node, element, elRect) { } function computeSize(size) { - if (this.widgets?.[0].last_y == null) return; + if (this.widgets?.[0]?.last_y == null) return; let y = this.widgets[0].last_y; let freeSpace = size[1] - y; @@ -195,7 +195,6 @@ export function addDomClippingSetting() { type: "boolean", defaultValue: enableDomClipping, onChange(value) { - console.log("enableDomClipping", enableDomClipping); enableDomClipping = !!value; }, }); diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 8a58d30b3a7..ebaf86fe428 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -462,8 +462,8 @@ class ComfyList { return $el("div", {textContent: item.prompt[0] + ": "}, [ $el("button", { textContent: "Load", - onclick: () => { - app.loadGraphData(item.prompt[3].extra_pnginfo.workflow); + onclick: async () => { + await app.loadGraphData(item.prompt[3].extra_pnginfo.workflow); if (item.outputs) { app.nodeOutputs = item.outputs; } @@ -784,9 +784,9 @@ export class ComfyUI { } }), $el("button", { - id: "comfy-load-default-button", textContent: "Load Default", onclick: () => { + id: "comfy-load-default-button", textContent: "Load Default", onclick: async () => { if (!confirmClear.value || confirm("Load default workflow?")) { - app.loadGraphData() + await app.loadGraphData() } } }), diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index fbc1d0fc324..de5877e5448 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -23,29 +23,73 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } -export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) { - const widgets = addValueControlWidgets(node, targetWidget, defaultValue, values, { +export function getWidgetType(inputData, inputName) { + const type = inputData[0]; + + if (Array.isArray(type)) { + return "COMBO"; + } else if (`${type}:${inputName}` in ComfyWidgets) { + return `${type}:${inputName}`; + } else if (type in ComfyWidgets) { + return type; + } else { + return null; + } +} + +export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values, widgetName, inputData) { + let name = inputData[1]?.control_after_generate; + if(typeof name !== "string") { + name = widgetName; + } + const widgets = addValueControlWidgets(node, targetWidget, defaultValue, { addFilterList: false, - }); + controlAfterGenerateName: name + }, inputData); return widgets[0]; } -export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", values, options) { +export function addValueControlWidgets(node, targetWidget, defaultValue = "randomize", options, inputData) { + if (!defaultValue) defaultValue = "randomize"; if (!options) options = {}; - + + const getName = (defaultName, optionName) => { + let name = defaultName; + if (options[optionName]) { + name = options[optionName]; + } else if (typeof inputData?.[1]?.[defaultName] === "string") { + name = inputData?.[1]?.[defaultName]; + } else if (inputData?.[1]?.control_prefix) { + name = inputData?.[1]?.control_prefix + " " + name + } + return name; + } + const widgets = []; - const valueControl = node.addWidget("combo", "control_after_generate", defaultValue, function (v) { }, { - values: ["fixed", "increment", "decrement", "randomize"], - serialize: false, // Don't include this in prompt. - }); + const valueControl = node.addWidget( + "combo", + getName("control_after_generate", "controlAfterGenerateName"), + defaultValue, + function () {}, + { + values: ["fixed", "increment", "decrement", "randomize"], + serialize: false, // Don't include this in prompt. + } + ); widgets.push(valueControl); const isCombo = targetWidget.type === "combo"; let comboFilter; if (isCombo && options.addFilterList !== false) { - comboFilter = node.addWidget("string", "control_filter_list", "", function (v) {}, { - serialize: false, // Don't include this in prompt. - }); + comboFilter = node.addWidget( + "string", + getName("control_filter_list", "controlFilterListName"), + "", + function () {}, + { + serialize: false, // Don't include this in prompt. + } + ); widgets.push(comboFilter); } @@ -96,7 +140,8 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando targetWidget.value = value; targetWidget.callback(value); } - } else { //number + } else { + //number let min = targetWidget.options.min; let max = targetWidget.options.max; // limit to something that javascript can handle @@ -119,32 +164,54 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando default: break; } - /*check if values are over or under their respective - * ranges and set them to min or max.*/ - if (targetWidget.value < min) - targetWidget.value = min; + /*check if values are over or under their respective + * ranges and set them to min or max.*/ + if (targetWidget.value < min) targetWidget.value = min; if (targetWidget.value > max) targetWidget.value = max; targetWidget.callback(targetWidget.value); } - } - + }; return widgets; }; -function seedWidget(node, inputName, inputData, app) { - const seed = ComfyWidgets.INT(node, inputName, inputData, app); - const seedControl = addValueControlWidget(node, seed.widget, "randomize"); +function seedWidget(node, inputName, inputData, app, widgetName) { + const seed = createIntWidget(node, inputName, inputData, app, true); + const seedControl = addValueControlWidget(node, seed.widget, "randomize", undefined, widgetName, inputData); seed.widget.linkedWidgets = [seedControl]; return seed; } + +function createIntWidget(node, inputName, inputData, app, isSeedInput) { + const control = inputData[1]?.control_after_generate; + if (!isSeedInput && control) { + return seedWidget(node, inputName, inputData, app, typeof control === "string" ? control : undefined); + } + + let widgetType = isSlider(inputData[1]["display"], app); + const { val, config } = getNumberDefaults(inputData, 1, 0, true); + Object.assign(config, { precision: 0 }); + return { + widget: node.addWidget( + widgetType, + inputName, + val, + function (v) { + const s = this.options.step / 10; + this.value = Math.round(v / s) * s; + }, + config + ), + }; +} + function addMultilineWidget(node, name, opts, app) { const inputEl = document.createElement("textarea"); inputEl.className = "comfy-multiline-input"; inputEl.value = opts.defaultVal; - inputEl.placeholder = opts.placeholder || ""; + inputEl.placeholder = opts.placeholder || name; const widget = node.addDOMWidget(name, "customtext", inputEl, { getValue() { @@ -156,6 +223,10 @@ function addMultilineWidget(node, name, opts, app) { }); widget.inputEl = inputEl; + inputEl.addEventListener("input", () => { + widget.callback?.(widget.value); + }); + return { minWidth: 400, minHeight: 200, widget }; } @@ -186,21 +257,7 @@ export const ComfyWidgets = { }, config) }; }, INT(node, inputName, inputData, app) { - let widgetType = isSlider(inputData[1]["display"], app); - const { val, config } = getNumberDefaults(inputData, 1, 0, true); - Object.assign(config, { precision: 0 }); - return { - widget: node.addWidget( - widgetType, - inputName, - val, - function (v) { - const s = this.options.step / 10; - this.value = Math.round(v / s) * s; - }, - config - ), - }; + return createIntWidget(node, inputName, inputData, app); }, BOOLEAN(node, inputName, inputData) { let defaultVal = false; @@ -245,10 +302,14 @@ export const ComfyWidgets = { if (inputData[1] && inputData[1].default) { defaultValue = inputData[1].default; } - return { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) }; + const res = { widget: node.addWidget("combo", inputName, defaultValue, () => {}, { values: type }) }; + if (inputData[1]?.control_after_generate) { + res.widget.linkedWidgets = addValueControlWidgets(node, res.widget, undefined, undefined, inputData); + } + return res; }, IMAGEUPLOAD(node, inputName, inputData, app) { - const imageWidget = node.widgets.find((w) => w.name === "image"); + const imageWidget = node.widgets.find((w) => w.name === (inputData[1]?.widget ?? "image")); let uploadWidget; function showImage(name) { @@ -362,9 +423,10 @@ export const ComfyWidgets = { document.body.append(fileInput); // Create the button widget for selecting the files - uploadWidget = node.addWidget("button", "choose file to upload", "image", () => { + uploadWidget = node.addWidget("button", inputName, "image", () => { fileInput.click(); }); + uploadWidget.label = "choose file to upload"; uploadWidget.serialize = false; // Add handler to check if an image is being dragged over our node From 6b769bca01bf7de989ab4aaafd8db41a92a87094 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 30 Nov 2023 15:22:32 -0500 Subject: [PATCH 259/420] Do a garbage collect after the interval even if nothing is running. --- execution.py | 6 ++++-- main.py | 45 +++++++++++++++++++++++++++++---------------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/execution.py b/execution.py index bca48a785c2..7db1f095b10 100644 --- a/execution.py +++ b/execution.py @@ -700,10 +700,12 @@ def put(self, item): self.server.queue_updated() self.not_empty.notify() - def get(self): + def get(self, timeout=None): with self.not_empty: while len(self.queue) == 0: - self.not_empty.wait() + self.not_empty.wait(timeout=timeout) + if timeout is not None and len(self.queue) == 0: + return None item = heapq.heappop(self.queue) i = self.task_counter self.currently_running[i] = copy.deepcopy(item) diff --git a/main.py b/main.py index 3997fbefcb3..1f9c5f443c3 100644 --- a/main.py +++ b/main.py @@ -89,23 +89,36 @@ def cuda_malloc_warning(): def prompt_worker(q, server): e = execution.PromptExecutor(server) last_gc_collect = 0 + need_gc = False + gc_collect_interval = 10.0 + while True: - item, item_id = q.get() - execution_start_time = time.perf_counter() - prompt_id = item[1] - e.execute(item[2], prompt_id, item[3], item[4]) - q.task_done(item_id, e.outputs_ui) - if server.client_id is not None: - server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) - - current_time = time.perf_counter() - execution_time = current_time - execution_start_time - print("Prompt executed in {:.2f} seconds".format(execution_time)) - if (current_time - last_gc_collect) > 10.0: - gc.collect() - comfy.model_management.soft_empty_cache() - last_gc_collect = current_time - print("gc collect") + timeout = None + if need_gc: + timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0) + + queue_item = q.get(timeout=timeout) + if queue_item is not None: + item, item_id = queue_item + execution_start_time = time.perf_counter() + prompt_id = item[1] + e.execute(item[2], prompt_id, item[3], item[4]) + need_gc = True + q.task_done(item_id, e.outputs_ui) + if server.client_id is not None: + server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) + + current_time = time.perf_counter() + execution_time = current_time - execution_start_time + print("Prompt executed in {:.2f} seconds".format(execution_time)) + + if need_gc: + current_time = time.perf_counter() + if (current_time - last_gc_collect) > gc_collect_interval: + gc.collect() + comfy.model_management.soft_empty_cache() + last_gc_collect = current_time + need_gc = False async def run(server, address='', port=8188, verbose=True, call_on_start=None): await asyncio.gather(server.start(address, port, verbose, call_on_start), server.publish_loop()) From c97be4db91d4a249c19afdf88fa1cf3268544e45 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 30 Nov 2023 19:27:03 -0500 Subject: [PATCH 260/420] Support SD2.1 turbo checkpoint. --- comfy/supported_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 7e2ac677d51..455323b9629 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -71,6 +71,10 @@ def model_type(self, state_dict, prefix=""): return model_base.ModelType.EPS def process_clip_state_dict(self, state_dict): + replace_prefix = {} + replace_prefix["conditioner.embedders.0.model."] = "cond_stage_model.model." #SD2 in sgm format + state_dict = utils.state_dict_prefix_replace(state_dict, replace_prefix) + state_dict = utils.transformers_convert(state_dict, "cond_stage_model.model.", "cond_stage_model.clip_h.transformer.text_model.", 24) return state_dict From 5d5c320054758413be00e98b26a28b39ee8f2acd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 02:03:34 -0500 Subject: [PATCH 261/420] Fix right click not working for some users. --- web/extensions/core/groupNode.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 450b4f5f35c..397c4c71393 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -1010,7 +1010,7 @@ function addConvertToGroupOptions() { const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; LGraphCanvas.prototype.getCanvasMenuOptions = function () { const options = getCanvasMenuOptions.apply(this, arguments); - const index = options.findIndex((o) => o?.content === "Add Group") + 1 || opts.length; + const index = options.findIndex((o) => o?.content === "Add Group") + 1 || options.length; addOption(options, index); return options; }; @@ -1020,7 +1020,7 @@ function addConvertToGroupOptions() { LGraphCanvas.prototype.getNodeMenuOptions = function (node) { const options = getNodeMenuOptions.apply(this, arguments); if (!GroupNodeHandler.isGroupNode(node)) { - const index = options.findIndex((o) => o?.content === "Outputs") + 1 || opts.length - 1; + const index = options.findIndex((o) => o?.content === "Outputs") + 1 || options.length - 1; addOption(options, index); } return options; From ec7a00aa9644049c306dd0a2c02cb4f91f127286 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 04:13:04 -0500 Subject: [PATCH 262/420] Fix extension widgets not working. --- web/extensions/core/groupNode.js | 3 +-- web/scripts/app.js | 18 ++++++++++++++++-- web/scripts/widgets.js | 14 -------------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 397c4c71393..4b4bf74fa08 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -1,6 +1,5 @@ import { app } from "../../scripts/app.js"; import { api } from "../../scripts/api.js"; -import { getWidgetType } from "../../scripts/widgets.js"; import { mergeIfValid } from "./widgetInputs.js"; const GROUP = Symbol(); @@ -332,7 +331,7 @@ export class GroupNodeConfig { const converted = new Map(); const widgetMap = (this.oldToNewWidgetMap[node.index] = {}); for (const inputName of inputNames) { - let widgetType = getWidgetType(inputs[inputName], inputName); + let widgetType = app.getWidgetType(inputs[inputName], inputName); if (widgetType) { const convertedIndex = node.inputs?.findIndex( (inp) => inp.name === inputName && inp.widget?.name === inputName diff --git a/web/scripts/app.js b/web/scripts/app.js index e9cfb277dd4..a72e30027e3 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,5 +1,5 @@ import { ComfyLogging } from "./logging.js"; -import { ComfyWidgets, getWidgetType } from "./widgets.js"; +import { ComfyWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; @@ -1377,6 +1377,20 @@ export class ComfyApp { await this.#invokeExtensionsAsync("registerCustomNodes"); } + getWidgetType(inputData, inputName) { + const type = inputData[0]; + + if (Array.isArray(type)) { + return "COMBO"; + } else if (`${type}:${inputName}` in this.widgets) { + return `${type}:${inputName}`; + } else if (type in this.widgets) { + return type; + } else { + return null; + } + } + async registerNodeDef(nodeId, nodeData) { const self = this; const node = Object.assign( @@ -1391,7 +1405,7 @@ export class ComfyApp { const type = inputData[0]; let widgetCreated = true; - const widgetType = getWidgetType(inputData, inputName); + const widgetType = self.getWidgetType(inputData, inputName); if(widgetType) { if(widgetType === "COMBO") { Object.assign(config, self.widgets.COMBO(this, inputName, inputData, app) || {}); diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index de5877e5448..d599b85ba94 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -23,20 +23,6 @@ function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } }; } -export function getWidgetType(inputData, inputName) { - const type = inputData[0]; - - if (Array.isArray(type)) { - return "COMBO"; - } else if (`${type}:${inputName}` in ComfyWidgets) { - return `${type}:${inputName}`; - } else if (type in ComfyWidgets) { - return type; - } else { - return null; - } -} - export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values, widgetName, inputData) { let name = inputData[1]?.control_after_generate; if(typeof name !== "string") { From 8491280504d69f38d1bc72568f8f745c5dc41d74 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Fri, 1 Dec 2023 22:24:20 +0000 Subject: [PATCH 263/420] Add Extension tests (#2125) * Add test for extension hooks Add afterConfigureGraph callback * fix comment --- tests-ui/tests/extensions.test.js | 196 ++++++++++++++++++++++++++++++ tests-ui/utils/index.js | 7 +- web/scripts/app.js | 1 + 3 files changed, 201 insertions(+), 3 deletions(-) create mode 100644 tests-ui/tests/extensions.test.js diff --git a/tests-ui/tests/extensions.test.js b/tests-ui/tests/extensions.test.js new file mode 100644 index 00000000000..b82e55c328b --- /dev/null +++ b/tests-ui/tests/extensions.test.js @@ -0,0 +1,196 @@ +// @ts-check +/// +const { start } = require("../utils"); +const lg = require("../utils/litegraph"); + +describe("extensions", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + it("calls each extension hook", async () => { + const mockExtension = { + name: "TestExtension", + init: jest.fn(), + setup: jest.fn(), + addCustomNodeDefs: jest.fn(), + getCustomWidgets: jest.fn(), + beforeRegisterNodeDef: jest.fn(), + registerCustomNodes: jest.fn(), + loadedGraphNode: jest.fn(), + nodeCreated: jest.fn(), + beforeConfigureGraph: jest.fn(), + afterConfigureGraph: jest.fn(), + }; + + const { app, ez, graph } = await start({ + async preSetup(app) { + app.registerExtension(mockExtension); + }, + }); + + // Basic initialisation hooks should be called once, with app + expect(mockExtension.init).toHaveBeenCalledTimes(1); + expect(mockExtension.init).toHaveBeenCalledWith(app); + + // Adding custom node defs should be passed the full list of nodes + expect(mockExtension.addCustomNodeDefs).toHaveBeenCalledTimes(1); + expect(mockExtension.addCustomNodeDefs.mock.calls[0][1]).toStrictEqual(app); + const defs = mockExtension.addCustomNodeDefs.mock.calls[0][0]; + expect(defs).toHaveProperty("KSampler"); + expect(defs).toHaveProperty("LoadImage"); + + // Get custom widgets is called once and should return new widget types + expect(mockExtension.getCustomWidgets).toHaveBeenCalledTimes(1); + expect(mockExtension.getCustomWidgets).toHaveBeenCalledWith(app); + + // Before register node def will be called once per node type + const nodeNames = Object.keys(defs); + const nodeCount = nodeNames.length; + expect(mockExtension.beforeRegisterNodeDef).toHaveBeenCalledTimes(nodeCount); + for (let i = 0; i < nodeCount; i++) { + // It should be send the JS class and the original JSON definition + const nodeClass = mockExtension.beforeRegisterNodeDef.mock.calls[i][0]; + const nodeDef = mockExtension.beforeRegisterNodeDef.mock.calls[i][1]; + + expect(nodeClass.name).toBe("ComfyNode"); + expect(nodeClass.comfyClass).toBe(nodeNames[i]); + expect(nodeDef.name).toBe(nodeNames[i]); + expect(nodeDef).toHaveProperty("input"); + expect(nodeDef).toHaveProperty("output"); + } + + // Register custom nodes is called once after registerNode defs to allow adding other frontend nodes + expect(mockExtension.registerCustomNodes).toHaveBeenCalledTimes(1); + + // Before configure graph will be called here as the default graph is being loaded + expect(mockExtension.beforeConfigureGraph).toHaveBeenCalledTimes(1); + // it gets sent the graph data that is going to be loaded + const graphData = mockExtension.beforeConfigureGraph.mock.calls[0][0]; + + // A node created is fired for each node constructor that is called + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length); + for (let i = 0; i < graphData.nodes.length; i++) { + expect(mockExtension.nodeCreated.mock.calls[i][0].type).toBe(graphData.nodes[i].type); + } + + // Each node then calls loadedGraphNode to allow them to be updated + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length); + for (let i = 0; i < graphData.nodes.length; i++) { + expect(mockExtension.loadedGraphNode.mock.calls[i][0].type).toBe(graphData.nodes[i].type); + } + + // After configure is then called once all the setup is done + expect(mockExtension.afterConfigureGraph).toHaveBeenCalledTimes(1); + + expect(mockExtension.setup).toHaveBeenCalledTimes(1); + expect(mockExtension.setup).toHaveBeenCalledWith(app); + + // Ensure hooks are called in the correct order + const callOrder = [ + "init", + "addCustomNodeDefs", + "getCustomWidgets", + "beforeRegisterNodeDef", + "registerCustomNodes", + "beforeConfigureGraph", + "nodeCreated", + "loadedGraphNode", + "afterConfigureGraph", + "setup", + ]; + for (let i = 1; i < callOrder.length; i++) { + const fn1 = mockExtension[callOrder[i - 1]]; + const fn2 = mockExtension[callOrder[i]]; + expect(fn1.mock.invocationCallOrder[0]).toBeLessThan(fn2.mock.invocationCallOrder[0]); + } + + graph.clear(); + + // Ensure adding a new node calls the correct callback + ez.LoadImage(); + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length); + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length + 1); + expect(mockExtension.nodeCreated.mock.lastCall[0].type).toBe("LoadImage"); + + // Reload the graph to ensure correct hooks are fired + await graph.reload(); + + // These hooks should not be fired again + expect(mockExtension.init).toHaveBeenCalledTimes(1); + expect(mockExtension.addCustomNodeDefs).toHaveBeenCalledTimes(1); + expect(mockExtension.getCustomWidgets).toHaveBeenCalledTimes(1); + expect(mockExtension.registerCustomNodes).toHaveBeenCalledTimes(1); + expect(mockExtension.beforeRegisterNodeDef).toHaveBeenCalledTimes(nodeCount); + expect(mockExtension.setup).toHaveBeenCalledTimes(1); + + // These should be called again + expect(mockExtension.beforeConfigureGraph).toHaveBeenCalledTimes(2); + expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length + 2); + expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length + 1); + expect(mockExtension.afterConfigureGraph).toHaveBeenCalledTimes(2); + }); + + it("allows custom nodeDefs and widgets to be registered", async () => { + const widgetMock = jest.fn((node, inputName, inputData, app) => { + expect(node.constructor.comfyClass).toBe("TestNode"); + expect(inputName).toBe("test_input"); + expect(inputData[0]).toBe("CUSTOMWIDGET"); + expect(inputData[1]?.hello).toBe("world"); + expect(app).toStrictEqual(app); + + return { + widget: node.addWidget("button", inputName, "hello", () => {}), + }; + }); + + // Register our extension that adds a custom node + widget type + const mockExtension = { + name: "TestExtension", + addCustomNodeDefs: (nodeDefs) => { + nodeDefs["TestNode"] = { + output: [], + output_name: [], + output_is_list: [], + name: "TestNode", + display_name: "TestNode", + category: "Test", + input: { + required: { + test_input: ["CUSTOMWIDGET", { hello: "world" }], + }, + }, + }; + }, + getCustomWidgets: jest.fn(() => { + return { + CUSTOMWIDGET: widgetMock, + }; + }), + }; + + const { graph, ez } = await start({ + async preSetup(app) { + app.registerExtension(mockExtension); + }, + }); + + expect(mockExtension.getCustomWidgets).toBeCalledTimes(1); + + graph.clear(); + expect(widgetMock).toBeCalledTimes(0); + const node = ez.TestNode(); + expect(widgetMock).toBeCalledTimes(1); + + // Ensure our custom widget is created + expect(node.inputs.length).toBe(0); + expect(node.widgets.length).toBe(1); + const w = node.widgets[0].widget; + expect(w.name).toBe("test_input"); + expect(w.type).toBe("button"); + }); +}); diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index eeccdb3d921..3a018f566e4 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -4,11 +4,11 @@ const lg = require("./litegraph"); /** * - * @param { Parameters[0] & { resetEnv?: boolean } } config + * @param { Parameters[0] & { resetEnv?: boolean, preSetup?(app): Promise } } config * @returns */ -export async function start(config = undefined) { - if(config?.resetEnv) { +export async function start(config = {}) { + if(config.resetEnv) { jest.resetModules(); jest.resetAllMocks(); lg.setup(global); @@ -16,6 +16,7 @@ export async function start(config = undefined) { mockApi(config); const { app } = require("../../web/scripts/app"); + config.preSetup?.(app); await app.setup(); return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app }; } diff --git a/web/scripts/app.js b/web/scripts/app.js index a72e30027e3..861db16bddf 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1654,6 +1654,7 @@ export class ComfyApp { if (missingNodeTypes.length) { this.showMissingNodesError(missingNodeTypes); } + await this.#invokeExtensionsAsync("afterConfigureGraph", missingNodeTypes); } /** From 2995a2472541cb10ce9f4934baef9d5993ed3306 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 1 Dec 2023 18:29:33 -0500 Subject: [PATCH 264/420] Update readme. --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index af1f2281158..450a012bb8e 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git |---------------------------|--------------------------------------------------------------------------------------------------------------------| | Ctrl + Enter | Queue up current graph for generation | | Ctrl + Shift + Enter | Queue up current graph as first for generation | +| Ctrl + Z/Ctrl + Y | Undo/Redo | | Ctrl + S | Save workflow | | Ctrl + O | Load workflow | | Ctrl + A | Select all nodes | @@ -100,6 +101,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6``` This is the command to install the nightly with ROCm 5.7 that might have some performance improvements: + ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7``` ### NVIDIA @@ -192,7 +194,7 @@ To use a textual inversion concepts/embeddings in a text prompt put them in the Make sure you use the regular loaders/Load Checkpoint node to load checkpoints. It will auto pick the right settings depending on your GPU. -You can set this command line setting to disable the upcasting to fp32 in some cross attention operations which will increase your speed. Note that this will very likely give you black images on SD2.x models. If you use xformers this option does not do anything. +You can set this command line setting to disable the upcasting to fp32 in some cross attention operations which will increase your speed. Note that this will very likely give you black images on SD2.x models. If you use xformers or pytorch attention this option does not do anything. ```--dont-upcast-attention``` From 28220fa8392b6b2f0eabb0d0c8311ff3b07af69a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 2 Dec 2023 12:02:03 +0000 Subject: [PATCH 265/420] Fix node growing with DOM widgets when adding image even if enough space --- web/scripts/app.js | 2 +- web/scripts/domWidget.js | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 861db16bddf..8598d44781d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -411,7 +411,7 @@ export class ComfyApp { node.prototype.setSizeForImage = function (force) { if(!force && this.animatedImages) return; - if (this.inputHeight) { + if (this.inputHeight || this.freeWidgetSpace > 210) { this.setSize(this.size); return; } diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index 37d26f3c5ef..e919428a0b8 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -120,6 +120,8 @@ function computeSize(size) { freeSpace -= 220; } + this.freeWidgetSpace = freeSpace; + if (freeSpace < 0) { // Not enough space for all widgets so we need to grow size[1] -= freeSpace; From b2517b4ceb2e0fdea438586d1c8883eccab1f9bf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 2 Dec 2023 13:56:11 -0500 Subject: [PATCH 266/420] Load api workflow if regular workflow isn't in loaded image. --- web/scripts/app.js | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index d3049058aed..dc0f2c3c9bb 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1878,6 +1878,8 @@ export class ComfyApp { if (pngInfo) { if (pngInfo.workflow) { await this.loadGraphData(JSON.parse(pngInfo.workflow)); + } else if (pngInfo.prompt) { + this.loadApiJson(JSON.parse(pngInfo.prompt)); } else if (pngInfo.parameters) { importA1111(this.graph, pngInfo.parameters); } @@ -1889,6 +1891,8 @@ export class ComfyApp { this.loadGraphData(JSON.parse(pngInfo.workflow)); } else if (pngInfo.Workflow) { this.loadGraphData(JSON.parse(pngInfo.Workflow)); // Support loading workflows from that webp custom node. + } else if (pngInfo.prompt) { + this.loadApiJson(JSON.parse(pngInfo.prompt)); } } } else if (file.type === "application/json" || file.name?.endsWith(".json")) { @@ -1908,6 +1912,8 @@ export class ComfyApp { const info = await getLatentMetadata(file); if (info.workflow) { await this.loadGraphData(JSON.parse(info.workflow)); + } else if (info.prompt) { + this.loadApiJson(JSON.parse(info.prompt)); } } } From 61a123a1e083c584a333874b89828125171f7635 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 3 Dec 2023 03:31:47 -0500 Subject: [PATCH 267/420] A different way of handling multiple images passed to SVD. Previously when a list of 3 images [0, 1, 2] was used for a 6 frame video they were concated like this: [0, 1, 2, 0, 1, 2] now they are concated like this: [0, 0, 1, 1, 2, 2] --- comfy/model_base.py | 2 +- comfy/utils.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 786c9cf47ba..253ea66673b 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -303,7 +303,7 @@ def extra_conds(self, **kwargs): if latent_image.shape[1:] != noise.shape[1:]: latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") - latent_image = utils.repeat_to_batch_size(latent_image, noise.shape[0]) + latent_image = utils.resize_to_batch_size(latent_image, noise.shape[0]) out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) diff --git a/comfy/utils.py b/comfy/utils.py index 294bbb425ff..50557704736 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -239,6 +239,26 @@ def repeat_to_batch_size(tensor, batch_size): return tensor.repeat([math.ceil(batch_size / tensor.shape[0])] + [1] * (len(tensor.shape) - 1))[:batch_size] return tensor +def resize_to_batch_size(tensor, batch_size): + in_batch_size = tensor.shape[0] + if in_batch_size == batch_size: + return tensor + + if batch_size <= 1: + return tensor[:batch_size] + + output = torch.empty([batch_size] + list(tensor.shape)[1:], dtype=tensor.dtype, device=tensor.device) + if batch_size < in_batch_size: + scale = (in_batch_size - 1) / (batch_size - 1) + for i in range(batch_size): + output[i] = tensor[min(round(i * scale), in_batch_size - 1)] + else: + scale = in_batch_size / batch_size + for i in range(batch_size): + output[i] = tensor[min(math.floor((i + 0.5) * scale), in_batch_size - 1)] + + return output + def convert_sd_to(state_dict, dtype): keys = list(state_dict.keys()) for k in keys: From 496de0891d9b7ae37c6dbbc68c5e22802302dc8f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 16:49:48 +0000 Subject: [PATCH 268/420] Allow removing erroring embedded groups Unregister group nodes on workflow change --- web/extensions/core/groupNode.js | 29 ++++++++++++++++++++++++++++- web/scripts/app.js | 28 +++++++++++++++++++++++++--- web/style.css | 5 +++++ 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 4b4bf74fa08..6766f356d42 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -475,6 +475,16 @@ export class GroupNodeConfig { } static async registerFromWorkflow(groupNodes, missingNodeTypes) { + const clean = app.clean; + app.clean = function () { + for (const g in groupNodes) { + try { + LiteGraph.unregisterNodeType("workflow/" + g); + } catch (error) {} + } + app.clean = clean; + }; + for (const g in groupNodes) { const groupData = groupNodes[g]; @@ -482,7 +492,24 @@ export class GroupNodeConfig { for (const n of groupData.nodes) { // Find missing node types if (!(n.type in LiteGraph.registered_node_types)) { - missingNodeTypes.push(n.type); + missingNodeTypes.push({ + type: n.type, + hint: ` (In group node 'workflow/${g}')`, + }); + + missingNodeTypes.push({ + type: "workflow/" + g, + action: { + text: "Remove from workflow", + callback: (e) => { + delete groupNodes[g]; + e.target.textContent = "Removed"; + e.target.style.pointerEvents = "none"; + e.target.style.opacity = 0.7; + }, + }, + }); + hasMissing = true; } } diff --git a/web/scripts/app.js b/web/scripts/app.js index dc0f2c3c9bb..b3a22f3004d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1519,14 +1519,36 @@ export class ComfyApp { } showMissingNodesError(missingNodeTypes, hasAddedNodes = true) { + let seenTypes = new Set(); + this.ui.dialog.show( - $el("div", [ + $el("div.comfy-missing-nodes", [ $el("span", { textContent: "When loading the graph, the following node types were not found: " }), $el( "ul", - Array.from(new Set(missingNodeTypes)).map((t) => $el("li", { textContent: t })) + Array.from(new Set(missingNodeTypes)).map((t) => { + let children = []; + if (typeof t === "object") { + if(seenTypes.has(t.type)) return null; + seenTypes.add(t.type); + children.push($el("span", { textContent: t.type })); + if (t.hint) { + children.push($el("span", { textContent: t.hint })); + } + if (t.action) { + children.push($el("button", { onclick: t.action.callback, textContent: t.action.text })); + } + } else { + if(seenTypes.has(t)) return null; + seenTypes.add(t); + children.push($el("span", { textContent: t })); + } + return $el("li", children); + }).filter(Boolean) ), - ...(hasAddedNodes ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] : []), + ...(hasAddedNodes + ? [$el("span", { textContent: "Nodes that have failed to load will show as red on the graph." })] + : []), ]) ); this.logging.addEntry("Comfy.App", "warn", { diff --git a/web/style.css b/web/style.css index 378fe0a48b9..630eea12e6d 100644 --- a/web/style.css +++ b/web/style.css @@ -424,6 +424,11 @@ dialog::backdrop { height: var(--comfy-img-preview-height); } +.comfy-missing-nodes li button { + font-size: 12px; + margin-left: 5px; +} + /* Search box */ .litegraph.litesearchbox { From 44d8abadf08e0dcef3fb97d66d46e7cabc160e60 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 17:04:16 +0000 Subject: [PATCH 269/420] allow muting group node --- web/scripts/app.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index b3a22f3004d..5faf41fb36b 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1704,7 +1704,8 @@ export class ComfyApp { const output = {}; // Process nodes in order of execution for (const outerNode of this.graph.computeExecutionOrder(false)) { - const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; + const skipNode = outerNode.mode === 2 || outerNode.mode === 4; + const innerNodes = (!skipNode && outerNode.getInnerNodes) ? outerNode.getInnerNodes() : [outerNode]; for (const node of innerNodes) { if (node.isVirtualNode) { continue; From 77ab2c3f699ca35d19060d8add6bccd66a1b532f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 3 Dec 2023 17:17:23 +0000 Subject: [PATCH 270/420] fix template sorting --- web/extensions/core/nodeTemplates.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index 2d4821742d1..bc9a108644a 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -164,7 +164,7 @@ class ManageTemplates extends ComfyDialog { var prev_i = el.dataset.id; if ( el == this.draggedEl && prev_i != i ) { - [this.templates[i], this.templates[prev_i]] = [this.templates[prev_i], this.templates[i]]; + this.templates.splice(i, 0, this.templates.splice(prev_i, 1)[0]); } el.dataset.id = i; }); From af365e4dd152b23cd6cf993ddf9ed7c7e7088b39 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 03:12:18 -0500 Subject: [PATCH 271/420] All the unet ops with weights are now handled by comfy.ops --- comfy/controlnet.py | 10 ++++++++++ comfy/ldm/modules/attention.py | 18 ++++-------------- .../modules/diffusionmodules/openaimodel.py | 13 ++++++------- comfy/ops.py | 8 ++++++++ 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 433381df6ec..6dd99afdc77 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -5,6 +5,7 @@ import comfy.model_management import comfy.model_detection import comfy.model_patcher +import comfy.ops import comfy.cldm.cldm import comfy.t2i_adapter.adapter @@ -248,6 +249,15 @@ def conv_nd(self, dims, *args, **kwargs): else: raise ValueError(f"unsupported dimensions: {dims}") + class Conv3d(comfy.ops.Conv3d): + pass + + class GroupNorm(comfy.ops.GroupNorm): + pass + + class LayerNorm(comfy.ops.LayerNorm): + pass + class ControlLora(ControlNet): def __init__(self, control_weights, global_average_pooling=False, device=None): diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index f684523823d..c2b85a6914b 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -83,16 +83,6 @@ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, def forward(self, x): return self.net(x) - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - def Normalize(in_channels, dtype=None, device=None): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) @@ -414,10 +404,10 @@ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff= self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2, heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations) # is self-attn if context is none - self.norm2 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + self.norm2 = operations.LayerNorm(inner_dim, dtype=dtype, device=device) - self.norm1 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) - self.norm3 = nn.LayerNorm(inner_dim, dtype=dtype, device=device) + self.norm1 = operations.LayerNorm(inner_dim, dtype=dtype, device=device) + self.norm3 = operations.LayerNorm(inner_dim, dtype=dtype, device=device) self.checkpoint = checkpoint self.n_heads = n_heads self.d_head = d_head @@ -559,7 +549,7 @@ def __init__(self, in_channels, n_heads, d_head, context_dim = [context_dim] * depth self.in_channels = in_channels inner_dim = n_heads * d_head - self.norm = Normalize(in_channels, dtype=dtype, device=device) + self.norm = operations.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device) if not use_linear: self.proj_in = operations.Conv2d(in_channels, inner_dim, diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 48264892c26..855c3d1f4cd 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -177,7 +177,7 @@ def __init__( padding = kernel_size // 2 self.in_layers = nn.Sequential( - nn.GroupNorm(32, channels, dtype=dtype, device=device), + operations.GroupNorm(32, channels, dtype=dtype, device=device), nn.SiLU(), operations.conv_nd(dims, channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device), ) @@ -206,12 +206,11 @@ def __init__( ), ) self.out_layers = nn.Sequential( - nn.GroupNorm(32, self.out_channels, dtype=dtype, device=device), + operations.GroupNorm(32, self.out_channels, dtype=dtype, device=device), nn.SiLU(), nn.Dropout(p=dropout), - zero_module( - operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device) - ), + operations.conv_nd(dims, self.out_channels, self.out_channels, kernel_size, padding=padding, dtype=dtype, device=device) + , ) if self.out_channels == channels: @@ -810,13 +809,13 @@ def get_resblock( self._feature_size += ch self.out = nn.Sequential( - nn.GroupNorm(32, ch, dtype=self.dtype, device=device), + operations.GroupNorm(32, ch, dtype=self.dtype, device=device), nn.SiLU(), zero_module(operations.conv_nd(dims, model_channels, out_channels, 3, padding=1, dtype=self.dtype, device=device)), ) if self.predict_codebook_ids: self.id_predictor = nn.Sequential( - nn.GroupNorm(32, ch, dtype=self.dtype, device=device), + operations.GroupNorm(32, ch, dtype=self.dtype, device=device), operations.conv_nd(dims, model_channels, n_embed, 1, dtype=self.dtype, device=device), #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits ) diff --git a/comfy/ops.py b/comfy/ops.py index 0bfb698aa7f..deb849d63c9 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -13,6 +13,14 @@ class Conv3d(torch.nn.Conv3d): def reset_parameters(self): return None +class GroupNorm(torch.nn.GroupNorm): + def reset_parameters(self): + return None + +class LayerNorm(torch.nn.LayerNorm): + def reset_parameters(self): + return None + def conv_nd(dims, *args, **kwargs): if dims == 2: return Conv2d(*args, **kwargs) From 31b0f6f3d8034371e95024d6bba5c193db79bd9d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 11:10:00 -0500 Subject: [PATCH 272/420] UNET weights can now be stored in fp8. --fp8_e4m3fn-unet and --fp8_e5m2-unet are the two different formats supported by pytorch. --- comfy/cldm/cldm.py | 4 ++-- comfy/cli_args.py | 5 ++++- comfy/controlnet.py | 16 ++++++++++++---- .../ldm/modules/diffusionmodules/openaimodel.py | 4 ++-- comfy/model_base.py | 13 ++++++++++++- comfy/model_management.py | 15 +++++++++++++++ 6 files changed, 47 insertions(+), 10 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 76a525b378a..bbe5891e691 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -283,7 +283,7 @@ def make_zero_conv(self, channels, operations=None): return TimestepEmbedSequential(zero_module(operations.conv_nd(self.dims, channels, channels, 1, padding=0))) def forward(self, x, hint, timesteps, context, y=None, **kwargs): - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(self.dtype) + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) emb = self.time_embed(t_emb) guided_hint = self.input_hint_block(hint, emb, context) @@ -295,7 +295,7 @@ def forward(self, x, hint, timesteps, context, y=None, **kwargs): assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) - h = x.type(self.dtype) + h = x for module, zero_conv in zip(self.input_blocks, self.zero_convs): if guided_hint is not None: h = module(h, emb, context) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 72fce10872f..58d0348028f 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -55,7 +55,10 @@ def __call__(self, parser, namespace, values, option_string=None): fp_group.add_argument("--force-fp32", action="store_true", help="Force fp32 (If this makes your GPU work better please report it).") fp_group.add_argument("--force-fp16", action="store_true", help="Force fp16.") -parser.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.") +fpunet_group = parser.add_mutually_exclusive_group() +fpunet_group.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.") +fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.") +fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.") fpvae_group = parser.add_mutually_exclusive_group() fpvae_group.add_argument("--fp16-vae", action="store_true", help="Run the VAE in fp16, might cause black images.") diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 6dd99afdc77..5921e6b1d19 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -1,6 +1,7 @@ import torch import math import os +import contextlib import comfy.utils import comfy.model_management import comfy.model_detection @@ -147,24 +148,31 @@ def get_control(self, x_noisy, t, cond, batched_number): else: return None + dtype = self.control_model.dtype + if comfy.model_management.supports_dtype(self.device, dtype): + precision_scope = lambda a: contextlib.nullcontext(a) + else: + precision_scope = torch.autocast + dtype = torch.float32 + output_dtype = x_noisy.dtype if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: del self.cond_hint self.cond_hint = None - self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(self.control_model.dtype).to(self.device) + self.cond_hint = comfy.utils.common_upscale(self.cond_hint_original, x_noisy.shape[3] * 8, x_noisy.shape[2] * 8, 'nearest-exact', "center").to(dtype).to(self.device) if x_noisy.shape[0] != self.cond_hint.shape[0]: self.cond_hint = broadcast_image_to(self.cond_hint, x_noisy.shape[0], batched_number) - context = cond['c_crossattn'] y = cond.get('y', None) if y is not None: - y = y.to(self.control_model.dtype) + y = y.to(dtype) timestep = self.model_sampling_current.timestep(t) x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) - control = self.control_model(x=x_noisy.to(self.control_model.dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(self.control_model.dtype), y=y) + with precision_scope(comfy.model_management.get_autocast_device(self.device)): + control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(dtype), y=y) return self.control_merge(None, control, control_prev, output_dtype) def copy(self): diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 855c3d1f4cd..12efd833c51 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -841,14 +841,14 @@ def forward(self, x, timesteps=None, context=None, y=None, control=None, transfo self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(self.dtype) + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) - h = x.type(self.dtype) + h = x for id, module in enumerate(self.input_blocks): transformer_options["block"] = ("input", id) h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) diff --git a/comfy/model_base.py b/comfy/model_base.py index 253ea66673b..5bfcc391ded 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -5,6 +5,7 @@ import comfy.model_management import comfy.conds from enum import Enum +import contextlib from . import utils class ModelType(Enum): @@ -61,6 +62,13 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans context = c_crossattn dtype = self.get_dtype() + + if comfy.model_management.supports_dtype(xc.device, dtype): + precision_scope = lambda a: contextlib.nullcontext(a) + else: + precision_scope = torch.autocast + dtype = torch.float32 + xc = xc.to(dtype) t = self.model_sampling.timestep(t).float() context = context.to(dtype) @@ -70,7 +78,10 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans if hasattr(extra, "to"): extra = extra.to(dtype) extra_conds[o] = extra - model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + + with precision_scope(comfy.model_management.get_autocast_device(xc.device)): + model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() + return self.model_sampling.calculate_denoised(sigma, model_output, x) def get_dtype(self): diff --git a/comfy/model_management.py b/comfy/model_management.py index d4acd8950ca..18d15f9d064 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -459,6 +459,10 @@ def unet_inital_load_device(parameters, dtype): def unet_dtype(device=None, model_params=0): if args.bf16_unet: return torch.bfloat16 + if args.fp8_e4m3fn_unet: + return torch.float8_e4m3fn + if args.fp8_e5m2_unet: + return torch.float8_e5m2 if should_use_fp16(device=device, model_params=model_params): return torch.float16 return torch.float32 @@ -515,6 +519,17 @@ def get_autocast_device(dev): return dev.type return "cuda" +def supports_dtype(device, dtype): #TODO + if dtype == torch.float32: + return True + if torch.device("cpu") == device: + return False + if dtype == torch.float16: + return True + if dtype == torch.bfloat16: + return True + return False + def cast_to_device(tensor, device, dtype, copy=False): device_supports_cast = False if tensor.dtype == torch.float32 or tensor.dtype == torch.float16: From ca82ade7652c80727b402f51a115feb5df4ad27a Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 11:52:06 -0500 Subject: [PATCH 273/420] Use .itemsize to get dtype size for fp8. --- comfy/model_management.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 18d15f9d064..94d5969698d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -430,6 +430,13 @@ def dtype_size(dtype): dtype_size = 4 if dtype == torch.float16 or dtype == torch.bfloat16: dtype_size = 2 + elif dtype == torch.float32: + dtype_size = 4 + else: + try: + dtype_size = dtype.itemsize + except: #Old pytorch doesn't have .itemsize + pass return dtype_size def unet_offload_device(): From be3468ddd5db871e3943003e0fd7a2219c7d02e6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 12:49:00 -0500 Subject: [PATCH 274/420] Less useless downcasting. --- comfy/sd1_clip.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 58acb97fce7..4e9f6bffe01 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -84,12 +84,16 @@ def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_le self.inner_name = inner_name if dtype is not None: - self.transformer.to(dtype) inner_model = getattr(self.transformer, self.inner_name) if hasattr(inner_model, "embeddings"): - inner_model.embeddings.to(torch.float32) + embeddings_bak = inner_model.embeddings.to(torch.float32) + inner_model.embeddings = None + self.transformer.to(dtype) + inner_model.embeddings = embeddings_bak else: - self.transformer.set_input_embeddings(self.transformer.get_input_embeddings().to(torch.float32)) + previous_inputs = self.transformer.get_input_embeddings().to(torch.float32, copy=True) + self.transformer.to(dtype) + self.transformer.set_input_embeddings(previous_inputs) self.max_length = max_length if freeze: From 26b1c0a77150be2253f88e4cd106a11112d96d59 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 13:47:41 -0500 Subject: [PATCH 275/420] Fix control lora on fp8. --- comfy/controlnet.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 5921e6b1d19..6d37aa74f69 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -208,7 +208,7 @@ def __init__(self, in_features: int, out_features: int, bias: bool = True, def forward(self, input): if self.up is not None: - return torch.nn.functional.linear(input, self.weight.to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) + return torch.nn.functional.linear(input, self.weight.to(input.dtype).to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) else: return torch.nn.functional.linear(input, self.weight.to(input.device), self.bias) @@ -247,7 +247,7 @@ def __init__( def forward(self, input): if self.up is not None: - return torch.nn.functional.conv2d(input, self.weight.to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, self.weight.to(input.dtype).to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) else: return torch.nn.functional.conv2d(input, self.weight.to(input.device), self.bias, self.stride, self.padding, self.dilation, self.groups) From 9b655d4fd72903d33af101177b0cb9576c5babd3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 4 Dec 2023 21:55:19 -0500 Subject: [PATCH 276/420] Fix memory issue with control loras. --- comfy/sample.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy/sample.py b/comfy/sample.py index 034db97ee88..bcbed3343d6 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -101,7 +101,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative samples = samples.cpu() cleanup_additional_models(models) - cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) + cleanup_additional_models(set(get_models_from_cond(positive_copy, "control") + get_models_from_cond(negative_copy, "control"))) return samples def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=None, callback=None, disable_pbar=False, seed=None): @@ -113,6 +113,6 @@ def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) samples = samples.cpu() cleanup_additional_models(models) - cleanup_additional_models(set(get_models_from_cond(positive, "control") + get_models_from_cond(negative, "control"))) + cleanup_additional_models(set(get_models_from_cond(positive_copy, "control") + get_models_from_cond(negative_copy, "control"))) return samples From 1bbd65ab307a5510af1b2e6145fad0b6c583fe6c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 5 Dec 2023 12:48:41 -0500 Subject: [PATCH 277/420] Missed this one. --- comfy/ldm/modules/attention.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index c2b85a6914b..d3348c4722c 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -384,7 +384,7 @@ def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff= self.is_res = inner_dim == dim if self.ff_in: - self.norm_in = nn.LayerNorm(dim, dtype=dtype, device=device) + self.norm_in = operations.LayerNorm(dim, dtype=dtype, device=device) self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations) self.disable_self_attn = disable_self_attn From 44265e081031a4647b295b32e7f6b77ab71c80c9 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 5 Dec 2023 20:27:13 +0000 Subject: [PATCH 278/420] Allow connecting primitivenode to reroutes --- web/extensions/core/rerouteNode.js | 56 ++++++++++++++++--- web/extensions/core/widgetInputs.js | 86 +++++++++++++++++++++-------- 2 files changed, 112 insertions(+), 30 deletions(-) diff --git a/web/extensions/core/rerouteNode.js b/web/extensions/core/rerouteNode.js index 499a171da16..cfa952f3c47 100644 --- a/web/extensions/core/rerouteNode.js +++ b/web/extensions/core/rerouteNode.js @@ -1,10 +1,11 @@ import { app } from "../../scripts/app.js"; +import { mergeIfValid, getWidgetConfig, setWidgetConfig } from "./widgetInputs.js"; // Node that allows you to redirect connections for cleaner graphs app.registerExtension({ name: "Comfy.RerouteNode", - registerCustomNodes() { + registerCustomNodes(app) { class RerouteNode { constructor() { if (!this.properties) { @@ -16,6 +17,12 @@ app.registerExtension({ this.addInput("", "*"); this.addOutput(this.properties.showOutputText ? "*" : "", "*"); + this.onAfterGraphConfigured = function () { + requestAnimationFrame(() => { + this.onConnectionsChange(LiteGraph.INPUT, null, true, null); + }); + }; + this.onConnectionsChange = function (type, index, connected, link_info) { this.applyOrientation(); @@ -54,8 +61,7 @@ app.registerExtension({ // We've found a circle currentNode.disconnectInput(link.target_slot); currentNode = null; - } - else { + } else { // Move the previous node currentNode = node; } @@ -94,8 +100,11 @@ app.registerExtension({ updateNodes.push(node); } else { // We've found an output - const nodeOutType = node.inputs && node.inputs[link?.target_slot] && node.inputs[link.target_slot].type ? node.inputs[link.target_slot].type : null; - if (inputType && nodeOutType !== inputType) { + const nodeOutType = + node.inputs && node.inputs[link?.target_slot] && node.inputs[link.target_slot].type + ? node.inputs[link.target_slot].type + : null; + if (inputType && inputType !== "*" && nodeOutType !== inputType) { // The output doesnt match our input so disconnect it node.disconnectInput(link.target_slot); } else { @@ -111,6 +120,9 @@ app.registerExtension({ const displayType = inputType || outputType || "*"; const color = LGraphCanvas.link_type_colors[displayType]; + let widgetConfig; + let targetWidget; + let widgetType; // Update the types of each node for (const node of updateNodes) { // If we dont have an input type we are always wildcard but we'll show the output type @@ -125,10 +137,38 @@ app.registerExtension({ const link = app.graph.links[l]; if (link) { link.color = color; + + if (app.configuringGraph) continue; + const targetNode = app.graph.getNodeById(link.target_id); + const targetInput = targetNode.inputs?.[link.target_slot]; + if (targetInput?.widget) { + const config = getWidgetConfig(targetInput); + if (!widgetConfig) { + widgetConfig = config[1] ?? {}; + widgetType = config[0]; + } + if (!targetWidget) { + targetWidget = targetNode.widgets?.find((w) => w.name === targetInput.widget.name); + } + + const merged = mergeIfValid(targetInput, [config[0], widgetConfig]); + if (merged.customConfig) { + widgetConfig = merged.customConfig; + } + } } } } + for (const node of updateNodes) { + if (widgetConfig && outputType) { + node.inputs[0].widget = { name: "value" }; + setWidgetConfig(node.inputs[0], [widgetType ?? displayType, widgetConfig], targetWidget); + } else { + setWidgetConfig(node.inputs[0], null); + } + } + if (inputNode) { const link = app.graph.links[inputNode.inputs[0].link]; if (link) { @@ -173,8 +213,8 @@ app.registerExtension({ }, { // naming is inverted with respect to LiteGraphNode.horizontal - // LiteGraphNode.horizontal == true means that - // each slot in the inputs and outputs are layed out horizontally, + // LiteGraphNode.horizontal == true means that + // each slot in the inputs and outputs are layed out horizontally, // which is the opposite of the visual orientation of the inputs and outputs as a node content: "Set " + (this.properties.horizontal ? "Horizontal" : "Vertical"), callback: () => { @@ -187,7 +227,7 @@ app.registerExtension({ applyOrientation() { this.horizontal = this.properties.horizontal; if (this.horizontal) { - // we correct the input position, because LiteGraphNode.horizontal + // we correct the input position, because LiteGraphNode.horizontal // doesn't account for title presence // which reroute nodes don't have this.inputs[0].pos = [this.size[0] / 2, 0]; diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index b6fa411f7e1..c33f7346af6 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -5,6 +5,11 @@ const CONVERTED_TYPE = "converted-widget"; const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; const CONFIG = Symbol(); const GET_CONFIG = Symbol(); +const TARGET = Symbol(); // Used for reroutes to specify the real target widget + +export function getWidgetConfig(slot) { + return slot.widget[CONFIG] ?? slot.widget[GET_CONFIG](); +} function getConfig(widgetName) { const { nodeData } = this.constructor; @@ -100,7 +105,6 @@ function getWidgetType(config) { return { type }; } - function isValidCombo(combo, obj) { // New input isnt a combo if (!(obj instanceof Array)) { @@ -121,6 +125,31 @@ function isValidCombo(combo, obj) { return true; } +export function setWidgetConfig(slot, config, target) { + if (!slot.widget) return; + if (config) { + slot.widget[GET_CONFIG] = () => config; + slot.widget[TARGET] = target; + } else { + delete slot.widget; + } + + if (slot.link) { + const link = app.graph.links[slot.link]; + if (link) { + const originNode = app.graph.getNodeById(link.origin_id); + if (originNode.type === "PrimitiveNode") { + if (config) { + originNode.recreateWidget(); + } else if(!app.configuringGraph) { + originNode.disconnectOutput(0); + originNode.onLastDisconnect(); + } + } + } + } +} + export function mergeIfValid(output, config2, forceUpdate, recreateWidget, config1) { if (!config1) { config1 = output.widget[CONFIG] ?? output.widget[GET_CONFIG](); @@ -434,14 +463,20 @@ app.registerExtension({ for (const linkInfo of links) { const node = this.graph.getNodeById(linkInfo.target_id); const input = node.inputs[linkInfo.target_slot]; - const widgetName = input.widget.name; - if (widgetName) { - const widget = node.widgets.find((w) => w.name === widgetName); - if (widget) { - widget.value = this.widgets[0].value; - if (widget.callback) { - widget.callback(widget.value, app.canvas, node, app.canvas.graph_mouse, {}); - } + let widget; + if (input.widget[TARGET]) { + widget = input.widget[TARGET]; + } else { + const widgetName = input.widget.name; + if (widgetName) { + widget = node.widgets.find((w) => w.name === widgetName); + } + } + + if (widget) { + widget.value = this.widgets[0].value; + if (widget.callback) { + widget.callback(widget.value, app.canvas, node, app.canvas.graph_mouse, {}); } } } @@ -494,14 +529,13 @@ app.registerExtension({ this.#mergeWidgetConfig(); if (!links?.length) { - this.#onLastDisconnect(); + this.onLastDisconnect(); } } } onConnectOutput(slot, type, input, target_node, target_slot) { // Fires before the link is made allowing us to reject it if it isn't valid - // No widget, we cant connect if (!input.widget) { if (!(input.type in ComfyWidgets)) return false; @@ -519,6 +553,10 @@ app.registerExtension({ #onFirstConnection(recreating) { // First connection can fire before the graph is ready on initial load so random things can be missing + if (!this.outputs[0].links) { + this.onLastDisconnect(); + return; + } const linkId = this.outputs[0].links[0]; const link = this.graph.links[linkId]; if (!link) return; @@ -546,10 +584,10 @@ app.registerExtension({ this.outputs[0].name = type; this.outputs[0].widget = widget; - this.#createWidget(widget[CONFIG] ?? config, theirNode, widget.name, recreating); + this.#createWidget(widget[CONFIG] ?? config, theirNode, widget.name, recreating, widget[TARGET]); } - #createWidget(inputData, node, widgetName, recreating) { + #createWidget(inputData, node, widgetName, recreating, targetWidget) { let type = inputData[0]; if (type instanceof Array) { @@ -563,7 +601,9 @@ app.registerExtension({ widget = this.addWidget(type, "value", null, () => {}, {}); } - if (node?.widgets && widget) { + if (targetWidget) { + widget.value = targetWidget.value; + } else if (node?.widgets && widget) { const theirWidget = node.widgets.find((w) => w.name === widgetName); if (theirWidget) { widget.value = theirWidget.value; @@ -577,7 +617,7 @@ app.registerExtension({ } addValueControlWidgets(this, widget, control_value, undefined, inputData); let filter = this.widgets_values?.[2]; - if(filter && this.widgets.length === 3) { + if (filter && this.widgets.length === 3) { this.widgets[2].value = filter; } } @@ -610,12 +650,14 @@ app.registerExtension({ } } - #recreateWidget() { - const values = this.widgets.map((w) => w.value); + recreateWidget() { + const values = this.widgets?.map((w) => w.value); this.#removeWidgets(); this.#onFirstConnection(true); - for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i]; - return this.widgets[0]; + if (values?.length) { + for (let i = 0; i < this.widgets?.length; i++) this.widgets[i].value = values[i]; + } + return this.widgets?.[0]; } #mergeWidgetConfig() { @@ -631,7 +673,7 @@ app.registerExtension({ if (links?.length < 2 && hasConfig) { // Copy the widget options from the source if (links.length) { - this.#recreateWidget(); + this.recreateWidget(); } return; @@ -657,7 +699,7 @@ app.registerExtension({ // Only allow connections where the configs match const output = this.outputs[0]; const config2 = input.widget[GET_CONFIG](); - return !!mergeIfValid.call(this, output, config2, forceUpdate, this.#recreateWidget); + return !!mergeIfValid.call(this, output, config2, forceUpdate, this.recreateWidget); } #removeWidgets() { @@ -672,7 +714,7 @@ app.registerExtension({ } } - #onLastDisconnect() { + onLastDisconnect() { // We cant remove + re-add the output here as if you drag a link over the same link // it removes, then re-adds, causing it to break this.outputs[0].type = "*"; From a99da6667fadf4683ec24e44546cd5ce8f9e7aff Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 5 Dec 2023 20:28:05 +0000 Subject: [PATCH 279/420] reroute + primitive tests --- tests-ui/tests/widgetInputs.test.js | 174 +++++++++++++++++++++++++++- tests-ui/utils/ezgraph.js | 5 +- 2 files changed, 171 insertions(+), 8 deletions(-) diff --git a/tests-ui/tests/widgetInputs.test.js b/tests-ui/tests/widgetInputs.test.js index 8e191adf043..67e3fa341ec 100644 --- a/tests-ui/tests/widgetInputs.test.js +++ b/tests-ui/tests/widgetInputs.test.js @@ -1,7 +1,13 @@ // @ts-check /// -const { start, makeNodeDef, checkBeforeAndAfterReload, assertNotNullOrUndefined } = require("../utils"); +const { + start, + makeNodeDef, + checkBeforeAndAfterReload, + assertNotNullOrUndefined, + createDefaultWorkflow, +} = require("../utils"); const lg = require("../utils/litegraph"); /** @@ -36,7 +42,7 @@ async function connectPrimitiveAndReload(ez, graph, input, widgetType, controlWi if (controlWidgetCount) { const controlWidget = primitive.widgets.control_after_generate; expect(controlWidget.widget.type).toBe("combo"); - if(widgetType === "combo") { + if (widgetType === "combo") { const filterWidget = primitive.widgets.control_filter_list; expect(filterWidget.widget.type).toBe("string"); } @@ -308,8 +314,8 @@ describe("widget inputs", () => { const { ez } = await start({ mockNodeDefs: { ...makeNodeDef("TestNode1", {}, [["A", "B"]]), - ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true}] }), - ...makeNodeDef("TestNode3", { example: [["A", "B", "C"], { forceInput: true}] }), + ...makeNodeDef("TestNode2", { example: [["A", "B"], { forceInput: true }] }), + ...makeNodeDef("TestNode3", { example: [["A", "B", "C"], { forceInput: true }] }), }, }); @@ -330,7 +336,7 @@ describe("widget inputs", () => { const n1 = ez.TestNode1(); n1.widgets.example.convertToInput(); - const p = ez.PrimitiveNode() + const p = ez.PrimitiveNode(); p.outputs[0].connectTo(n1.inputs[0]); const value = p.widgets.value; @@ -380,7 +386,7 @@ describe("widget inputs", () => { // Check random control.value = "randomize"; filter.value = "/D/"; - for(let i = 0; i < 100; i++) { + for (let i = 0; i < 100; i++) { control["afterQueued"](); expect(value.value === "D" || value.value === "DD").toBeTruthy(); } @@ -392,4 +398,160 @@ describe("widget inputs", () => { control["afterQueued"](); expect(value.value).toBe("B"); }); + + describe("reroutes", () => { + async function checkOutput(graph, values) { + expect((await graph.toPrompt()).output).toStrictEqual({ + 1: { inputs: { ckpt_name: "model1.safetensors" }, class_type: "CheckpointLoaderSimple" }, + 2: { inputs: { text: "positive", clip: ["1", 1] }, class_type: "CLIPTextEncode" }, + 3: { inputs: { text: "negative", clip: ["1", 1] }, class_type: "CLIPTextEncode" }, + 4: { + inputs: { width: values.width ?? 512, height: values.height ?? 512, batch_size: values?.batch_size ?? 1 }, + class_type: "EmptyLatentImage", + }, + 5: { + inputs: { + seed: 0, + steps: 20, + cfg: 8, + sampler_name: "euler", + scheduler: values?.scheduler ?? "normal", + denoise: 1, + model: ["1", 0], + positive: ["2", 0], + negative: ["3", 0], + latent_image: ["4", 0], + }, + class_type: "KSampler", + }, + 6: { inputs: { samples: ["5", 0], vae: ["1", 2] }, class_type: "VAEDecode" }, + 7: { + inputs: { filename_prefix: values.filename_prefix ?? "ComfyUI", images: ["6", 0] }, + class_type: "SaveImage", + }, + }); + } + + async function waitForWidget(node) { + // widgets are created slightly after the graph is ready + // hard to find an exact hook to get these so just wait for them to be ready + for (let i = 0; i < 10; i++) { + await new Promise((r) => setTimeout(r, 10)); + if (node.widgets?.value) { + return; + } + } + } + + it("can connect primitive via a reroute path to a widget input", async () => { + const { ez, graph } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + nodes.empty.widgets.width.convertToInput(); + nodes.sampler.widgets.scheduler.convertToInput(); + nodes.save.widgets.filename_prefix.convertToInput(); + + let widthReroute = ez.Reroute(); + let schedulerReroute = ez.Reroute(); + let fileReroute = ez.Reroute(); + + let widthNext = widthReroute; + let schedulerNext = schedulerReroute; + let fileNext = fileReroute; + + for (let i = 0; i < 5; i++) { + let next = ez.Reroute(); + widthNext.outputs[0].connectTo(next.inputs[0]); + widthNext = next; + + next = ez.Reroute(); + schedulerNext.outputs[0].connectTo(next.inputs[0]); + schedulerNext = next; + + next = ez.Reroute(); + fileNext.outputs[0].connectTo(next.inputs[0]); + fileNext = next; + } + + widthNext.outputs[0].connectTo(nodes.empty.inputs.width); + schedulerNext.outputs[0].connectTo(nodes.sampler.inputs.scheduler); + fileNext.outputs[0].connectTo(nodes.save.inputs.filename_prefix); + + let widthPrimitive = ez.PrimitiveNode(); + let schedulerPrimitive = ez.PrimitiveNode(); + let filePrimitive = ez.PrimitiveNode(); + + widthPrimitive.outputs[0].connectTo(widthReroute.inputs[0]); + schedulerPrimitive.outputs[0].connectTo(schedulerReroute.inputs[0]); + filePrimitive.outputs[0].connectTo(fileReroute.inputs[0]); + expect(widthPrimitive.widgets.value.value).toBe(512); + widthPrimitive.widgets.value.value = 1024; + expect(schedulerPrimitive.widgets.value.value).toBe("normal"); + schedulerPrimitive.widgets.value.value = "simple"; + expect(filePrimitive.widgets.value.value).toBe("ComfyUI"); + filePrimitive.widgets.value.value = "ComfyTest"; + + await checkBeforeAndAfterReload(graph, async () => { + widthPrimitive = graph.find(widthPrimitive); + schedulerPrimitive = graph.find(schedulerPrimitive); + filePrimitive = graph.find(filePrimitive); + await waitForWidget(filePrimitive); + expect(widthPrimitive.widgets.length).toBe(2); + expect(schedulerPrimitive.widgets.length).toBe(3); + expect(filePrimitive.widgets.length).toBe(1); + + await checkOutput(graph, { + width: 1024, + scheduler: "simple", + filename_prefix: "ComfyTest", + }); + }); + }); + it("can connect primitive via a reroute path to multiple widget inputs", async () => { + const { ez, graph } = await start(); + const nodes = createDefaultWorkflow(ez, graph); + + nodes.empty.widgets.width.convertToInput(); + nodes.empty.widgets.height.convertToInput(); + nodes.empty.widgets.batch_size.convertToInput(); + + let reroute = ez.Reroute(); + let prevReroute = reroute; + for (let i = 0; i < 5; i++) { + const next = ez.Reroute(); + prevReroute.outputs[0].connectTo(next.inputs[0]); + prevReroute = next; + } + + const r1 = ez.Reroute(prevReroute.outputs[0]); + const r2 = ez.Reroute(prevReroute.outputs[0]); + const r3 = ez.Reroute(r2.outputs[0]); + const r4 = ez.Reroute(r2.outputs[0]); + + r1.outputs[0].connectTo(nodes.empty.inputs.width); + r3.outputs[0].connectTo(nodes.empty.inputs.height); + r4.outputs[0].connectTo(nodes.empty.inputs.batch_size); + + let primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(reroute.inputs[0]); + expect(primitive.widgets.value.value).toBe(1); + primitive.widgets.value.value = 64; + + await checkBeforeAndAfterReload(graph, async (r) => { + primitive = graph.find(primitive); + await waitForWidget(primitive); + + // Ensure widget configs are merged + expect(primitive.widgets.value.widget.options?.min).toBe(16); // width/height min + expect(primitive.widgets.value.widget.options?.max).toBe(4096); // batch max + expect(primitive.widgets.value.widget.options?.step).toBe(80); // width/height step * 10 + + await checkOutput(graph, { + width: 64, + height: 64, + batch_size: 64, + }); + }); + }); + }); }); diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js index 898b82db051..3101aa29297 100644 --- a/tests-ui/utils/ezgraph.js +++ b/tests-ui/utils/ezgraph.js @@ -117,7 +117,7 @@ export class EzOutput extends EzSlot { const inp = input.input; const inName = inp.name || inp.label || inp.type; throw new Error( - `Connecting from ${input.node.node.type}[${inName}#${input.index}] -> ${this.node.node.type}[${ + `Connecting from ${input.node.node.type}#${input.node.id}[${inName}#${input.index}] -> ${this.node.node.type}#${this.node.id}[${ this.output.name ?? this.output.type }#${this.index}] failed.` ); @@ -179,6 +179,7 @@ export class EzWidget { set value(v) { this.widget.value = v; + this.widget.callback?.call?.(this.widget, v) } get isConvertedToInput() { @@ -319,7 +320,7 @@ export class EzGraph { } stringify() { - return JSON.stringify(this.app.graph.serialize(), undefined, "\t"); + return JSON.stringify(this.app.graph.serialize(), undefined); } /** From bcc469a2c95d40e0d64152d1531bc95d84fa98c5 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 5 Dec 2023 20:28:52 +0000 Subject: [PATCH 280/420] try to stop test failing --- tests-ui/tests/extensions.test.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests-ui/tests/extensions.test.js b/tests-ui/tests/extensions.test.js index b82e55c328b..159e5113a29 100644 --- a/tests-ui/tests/extensions.test.js +++ b/tests-ui/tests/extensions.test.js @@ -52,7 +52,7 @@ describe("extensions", () => { const nodeNames = Object.keys(defs); const nodeCount = nodeNames.length; expect(mockExtension.beforeRegisterNodeDef).toHaveBeenCalledTimes(nodeCount); - for (let i = 0; i < nodeCount; i++) { + for (let i = 0; i < 10; i++) { // It should be send the JS class and the original JSON definition const nodeClass = mockExtension.beforeRegisterNodeDef.mock.calls[i][0]; const nodeDef = mockExtension.beforeRegisterNodeDef.mock.calls[i][1]; @@ -133,7 +133,7 @@ describe("extensions", () => { expect(mockExtension.nodeCreated).toHaveBeenCalledTimes(graphData.nodes.length + 2); expect(mockExtension.loadedGraphNode).toHaveBeenCalledTimes(graphData.nodes.length + 1); expect(mockExtension.afterConfigureGraph).toHaveBeenCalledTimes(2); - }); + }, 15000); it("allows custom nodeDefs and widgets to be registered", async () => { const widgetMock = jest.fn((node, inputName, inputData, app) => { From 8de6f94f5cc5ee4ae690876108c1dd7705e59595 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 5 Dec 2023 21:02:10 +0000 Subject: [PATCH 281/420] Allow widget placeholder replacement on primitives --- web/extensions/core/saveImageExtraOutput.js | 73 ++------------------- web/extensions/core/widgetInputs.js | 13 +++- web/scripts/utils.js | 67 +++++++++++++++++++ 3 files changed, 83 insertions(+), 70 deletions(-) create mode 100644 web/scripts/utils.js diff --git a/web/extensions/core/saveImageExtraOutput.js b/web/extensions/core/saveImageExtraOutput.js index 99e2213bfee..a0506b43b6b 100644 --- a/web/extensions/core/saveImageExtraOutput.js +++ b/web/extensions/core/saveImageExtraOutput.js @@ -1,5 +1,5 @@ import { app } from "../../scripts/app.js"; - +import { applyTextReplacements } from "../../scripts/utils.js"; // Use widget values and dates in output filenames app.registerExtension({ @@ -7,84 +7,19 @@ app.registerExtension({ async beforeRegisterNodeDef(nodeType, nodeData, app) { if (nodeData.name === "SaveImage") { const onNodeCreated = nodeType.prototype.onNodeCreated; - - // Simple date formatter - const parts = { - d: (d) => d.getDate(), - M: (d) => d.getMonth() + 1, - h: (d) => d.getHours(), - m: (d) => d.getMinutes(), - s: (d) => d.getSeconds(), - }; - const format = - Object.keys(parts) - .map((k) => k + k + "?") - .join("|") + "|yyy?y?"; - - function formatDate(text, date) { - return text.replace(new RegExp(format, "g"), function (text) { - if (text === "yy") return (date.getFullYear() + "").substring(2); - if (text === "yyyy") return date.getFullYear(); - if (text[0] in parts) { - const p = parts[text[0]](date); - return (p + "").padStart(text.length, "0"); - } - return text; - }); - } - - // When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R + // When the SaveImage node is created we want to override the serialization of the output name widget to run our S&R nodeType.prototype.onNodeCreated = function () { const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; const widget = this.widgets.find((w) => w.name === "filename_prefix"); widget.serializeValue = () => { - return widget.value.replace(/%([^%]+)%/g, function (match, text) { - const split = text.split("."); - if (split.length !== 2) { - // Special handling for dates - if (split[0].startsWith("date:")) { - return formatDate(split[0].substring(5), new Date()); - } - - if (text !== "width" && text !== "height") { - // Dont warn on standard replacements - console.warn("Invalid replacement pattern", text); - } - return match; - } - - // Find node with matching S&R property name - let nodes = app.graph._nodes.filter((n) => n.properties?.["Node name for S&R"] === split[0]); - // If we cant, see if there is a node with that title - if (!nodes.length) { - nodes = app.graph._nodes.filter((n) => n.title === split[0]); - } - if (!nodes.length) { - console.warn("Unable to find node", split[0]); - return match; - } - - if (nodes.length > 1) { - console.warn("Multiple nodes matched", split[0], "using first match"); - } - - const node = nodes[0]; - - const widget = node.widgets?.find((w) => w.name === split[1]); - if (!widget) { - console.warn("Unable to find widget", split[1], "on node", split[0], node); - return match; - } - - return ((widget.value ?? "") + "").replaceAll(/\/|\\/g, "_"); - }); + return applyTextReplacements(app, widget.value); }; return r; }; } else { - // When any other node is created add a property to alias the node + // When any other node is created add a property to alias the node const onNodeCreated = nodeType.prototype.onNodeCreated; nodeType.prototype.onNodeCreated = function () { const r = onNodeCreated ? onNodeCreated.apply(this, arguments) : undefined; diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index b6fa411f7e1..b8dd47d0e06 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -1,5 +1,6 @@ import { ComfyWidgets, addValueControlWidgets } from "../../scripts/widgets.js"; import { app } from "../../scripts/app.js"; +import { applyTextReplacements } from "../../scripts/utils.js"; const CONVERTED_TYPE = "converted-widget"; const VALID_TYPES = ["STRING", "combo", "number", "BOOLEAN"]; @@ -405,11 +406,16 @@ app.registerExtension({ }; }, registerCustomNodes() { + const replacePropertyName = "Run widget replace on values"; class PrimitiveNode { constructor() { this.addOutput("connect to widget input", "*"); this.serialize_widgets = true; this.isVirtualNode = true; + + if (!this.properties || !(replacePropertyName in this.properties)) { + this.addProperty(replacePropertyName, false, "boolean"); + } } applyToGraph(extraLinks = []) { @@ -430,6 +436,11 @@ app.registerExtension({ } let links = [...get_links(this).map((l) => app.graph.links[l]), ...extraLinks]; + let v = this.widgets?.[0].value; + if(v && this.properties[replacePropertyName]) { + v = applyTextReplacements(app, v); + } + // For each output link copy our value over the original widget value for (const linkInfo of links) { const node = this.graph.getNodeById(linkInfo.target_id); @@ -438,7 +449,7 @@ app.registerExtension({ if (widgetName) { const widget = node.widgets.find((w) => w.name === widgetName); if (widget) { - widget.value = this.widgets[0].value; + widget.value = v; if (widget.callback) { widget.callback(widget.value, app.canvas, node, app.canvas.graph_mouse, {}); } diff --git a/web/scripts/utils.js b/web/scripts/utils.js new file mode 100644 index 00000000000..401aca9e487 --- /dev/null +++ b/web/scripts/utils.js @@ -0,0 +1,67 @@ +// Simple date formatter +const parts = { + d: (d) => d.getDate(), + M: (d) => d.getMonth() + 1, + h: (d) => d.getHours(), + m: (d) => d.getMinutes(), + s: (d) => d.getSeconds(), +}; +const format = + Object.keys(parts) + .map((k) => k + k + "?") + .join("|") + "|yyy?y?"; + +function formatDate(text, date) { + return text.replace(new RegExp(format, "g"), function (text) { + if (text === "yy") return (date.getFullYear() + "").substring(2); + if (text === "yyyy") return date.getFullYear(); + if (text[0] in parts) { + const p = parts[text[0]](date); + return (p + "").padStart(text.length, "0"); + } + return text; + }); +} + +export function applyTextReplacements(app, value) { + return value.replace(/%([^%]+)%/g, function (match, text) { + const split = text.split("."); + if (split.length !== 2) { + // Special handling for dates + if (split[0].startsWith("date:")) { + return formatDate(split[0].substring(5), new Date()); + } + + if (text !== "width" && text !== "height") { + // Dont warn on standard replacements + console.warn("Invalid replacement pattern", text); + } + return match; + } + + // Find node with matching S&R property name + let nodes = app.graph._nodes.filter((n) => n.properties?.["Node name for S&R"] === split[0]); + // If we cant, see if there is a node with that title + if (!nodes.length) { + nodes = app.graph._nodes.filter((n) => n.title === split[0]); + } + if (!nodes.length) { + console.warn("Unable to find node", split[0]); + return match; + } + + if (nodes.length > 1) { + console.warn("Multiple nodes matched", split[0], "using first match"); + } + + const node = nodes[0]; + + const widget = node.widgets?.find((w) => w.name === split[1]); + if (!widget) { + console.warn("Unable to find widget", split[1], "on node", split[0], node); + return match; + } + + return ((widget.value ?? "") + "").replaceAll(/\/|\\/g, "_"); + }); +} From 8112a0d9fcb80c341afa53798f62acdf618cee2b Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Wed, 6 Dec 2023 15:56:03 +0900 Subject: [PATCH 282/420] improve: Mask Editor (#2171) * renewal mask editor * fix: ignoring keydown when 2nd open --- web/extensions/core/maskeditor.js | 356 +++++++++++++++++++++--------- 1 file changed, 251 insertions(+), 105 deletions(-) diff --git a/web/extensions/core/maskeditor.js b/web/extensions/core/maskeditor.js index 8ace79562e4..1ea4dbcaa5c 100644 --- a/web/extensions/core/maskeditor.js +++ b/web/extensions/core/maskeditor.js @@ -33,6 +33,18 @@ function loadedImageToBlob(image) { return blob; } +function loadImage(imagePath) { + return new Promise((resolve, reject) => { + const image = new Image(); + + image.onload = function() { + resolve(image); + }; + + image.src = imagePath; + }); +} + async function uploadMask(filepath, formData) { await api.fetchApi('/upload/mask', { method: 'POST', @@ -50,25 +62,25 @@ async function uploadMask(filepath, formData) { ClipspaceDialog.invalidatePreview(); } -function prepareRGB(image, backupCanvas, backupCtx) { +function prepare_mask(image, maskCanvas, maskCtx) { // paste mask data into alpha channel - backupCtx.drawImage(image, 0, 0, backupCanvas.width, backupCanvas.height); - const backupData = backupCtx.getImageData(0, 0, backupCanvas.width, backupCanvas.height); + maskCtx.drawImage(image, 0, 0, maskCanvas.width, maskCanvas.height); + const maskData = maskCtx.getImageData(0, 0, maskCanvas.width, maskCanvas.height); - // refine mask image - for (let i = 0; i < backupData.data.length; i += 4) { - if(backupData.data[i+3] == 255) - backupData.data[i+3] = 0; + // invert mask + for (let i = 0; i < maskData.data.length; i += 4) { + if(maskData.data[i+3] == 255) + maskData.data[i+3] = 0; else - backupData.data[i+3] = 255; + maskData.data[i+3] = 255; - backupData.data[i] = 0; - backupData.data[i+1] = 0; - backupData.data[i+2] = 0; + maskData.data[i] = 0; + maskData.data[i+1] = 0; + maskData.data[i+2] = 0; } - backupCtx.globalCompositeOperation = 'source-over'; - backupCtx.putImageData(backupData, 0, 0); + maskCtx.globalCompositeOperation = 'source-over'; + maskCtx.putImageData(maskData, 0, 0); } class MaskEditorDialog extends ComfyDialog { @@ -184,14 +196,13 @@ class MaskEditorDialog extends ComfyDialog { this.element.appendChild(bottom_panel); document.body.appendChild(brush); - var brush_size_slider = this.createLeftSlider(self, "Thickness", (event) => { + this.brush_size_slider = this.createLeftSlider(self, "Thickness", (event) => { self.brush_size = event.target.value; self.updateBrushPreview(self, null, null); }); var clearButton = this.createLeftButton("Clear", () => { self.maskCtx.clearRect(0, 0, self.maskCanvas.width, self.maskCanvas.height); - self.backupCtx.clearRect(0, 0, self.backupCanvas.width, self.backupCanvas.height); }); var cancelButton = this.createRightButton("Cancel", () => { document.removeEventListener("mouseup", MaskEditorDialog.handleMouseUp); @@ -213,34 +224,37 @@ class MaskEditorDialog extends ComfyDialog { bottom_panel.appendChild(clearButton); bottom_panel.appendChild(this.saveButton); bottom_panel.appendChild(cancelButton); - bottom_panel.appendChild(brush_size_slider); + bottom_panel.appendChild(this.brush_size_slider); + + imgCanvas.style.position = "absolute"; + maskCanvas.style.position = "absolute"; - imgCanvas.style.position = "relative"; imgCanvas.style.top = "200"; imgCanvas.style.left = "0"; - maskCanvas.style.position = "absolute"; + maskCanvas.style.top = imgCanvas.style.top; + maskCanvas.style.left = imgCanvas.style.left; } - show() { + async show() { + this.zoom_ratio = 1.0; + this.pan_x = 0; + this.pan_y = 0; + if(!this.is_layout_created) { // layout const imgCanvas = document.createElement('canvas'); const maskCanvas = document.createElement('canvas'); - const backupCanvas = document.createElement('canvas'); imgCanvas.id = "imageCanvas"; maskCanvas.id = "maskCanvas"; - backupCanvas.id = "backupCanvas"; this.setlayout(imgCanvas, maskCanvas); // prepare content this.imgCanvas = imgCanvas; this.maskCanvas = maskCanvas; - this.backupCanvas = backupCanvas; - this.maskCtx = maskCanvas.getContext('2d'); - this.backupCtx = backupCanvas.getContext('2d'); + this.maskCtx = maskCanvas.getContext('2d', {willReadFrequently: true }); this.setEventHandler(maskCanvas); @@ -252,6 +266,8 @@ class MaskEditorDialog extends ComfyDialog { mutations.forEach(function(mutation) { if (mutation.type === 'attributes' && mutation.attributeName === 'style') { if(self.last_display_style && self.last_display_style != 'none' && self.element.style.display == 'none') { + document.removeEventListener("mouseup", MaskEditorDialog.handleMouseUp); + self.brush.style.display = "none"; ComfyApp.onClipspaceEditorClosed(); } @@ -264,7 +280,8 @@ class MaskEditorDialog extends ComfyDialog { observer.observe(this.element, config); } - this.setImages(this.imgCanvas, this.backupCanvas); + // The keydown event needs to be reconfigured when closing the dialog as it gets removed. + document.addEventListener('keydown', MaskEditorDialog.handleKeyDown); if(ComfyApp.clipspace_return_node) { this.saveButton.innerText = "Save to node"; @@ -275,97 +292,157 @@ class MaskEditorDialog extends ComfyDialog { this.saveButton.disabled = false; this.element.style.display = "block"; + this.element.style.width = "85%"; + this.element.style.margin = "0 7.5%"; + this.element.style.height = "100vh"; + this.element.style.top = "50%"; + this.element.style.left = "42%"; this.element.style.zIndex = 8888; // NOTE: alert dialog must be high priority. + + await this.setImages(this.imgCanvas); + + this.is_visible = true; } isOpened() { return this.element.style.display == "block"; } - setImages(imgCanvas, backupCanvas) { - const imgCtx = imgCanvas.getContext('2d'); - const backupCtx = backupCanvas.getContext('2d'); + invalidateCanvas(orig_image, mask_image) { + this.imgCanvas.width = orig_image.width; + this.imgCanvas.height = orig_image.height; + + this.maskCanvas.width = orig_image.width; + this.maskCanvas.height = orig_image.height; + + let imgCtx = this.imgCanvas.getContext('2d', {willReadFrequently: true }); + let maskCtx = this.maskCanvas.getContext('2d', {willReadFrequently: true }); + + imgCtx.drawImage(orig_image, 0, 0, orig_image.width, orig_image.height); + prepare_mask(mask_image, this.maskCanvas, maskCtx); + } + + async setImages(imgCanvas) { + let self = this; + + const imgCtx = imgCanvas.getContext('2d', {willReadFrequently: true }); const maskCtx = this.maskCtx; const maskCanvas = this.maskCanvas; - backupCtx.clearRect(0,0,this.backupCanvas.width,this.backupCanvas.height); imgCtx.clearRect(0,0,this.imgCanvas.width,this.imgCanvas.height); maskCtx.clearRect(0,0,this.maskCanvas.width,this.maskCanvas.height); // image load - const orig_image = new Image(); - window.addEventListener("resize", () => { - // repositioning - imgCanvas.width = window.innerWidth - 250; - imgCanvas.height = window.innerHeight - 200; - - // redraw image - let drawWidth = orig_image.width; - let drawHeight = orig_image.height; - if (orig_image.width > imgCanvas.width) { - drawWidth = imgCanvas.width; - drawHeight = (drawWidth / orig_image.width) * orig_image.height; - } - - if (drawHeight > imgCanvas.height) { - drawHeight = imgCanvas.height; - drawWidth = (drawHeight / orig_image.height) * orig_image.width; - } - - imgCtx.drawImage(orig_image, 0, 0, drawWidth, drawHeight); - - // update mask - maskCanvas.width = drawWidth; - maskCanvas.height = drawHeight; - maskCanvas.style.top = imgCanvas.offsetTop + "px"; - maskCanvas.style.left = imgCanvas.offsetLeft + "px"; - backupCtx.drawImage(maskCanvas, 0, 0, maskCanvas.width, maskCanvas.height, 0, 0, backupCanvas.width, backupCanvas.height); - maskCtx.drawImage(backupCanvas, 0, 0, backupCanvas.width, backupCanvas.height, 0, 0, maskCanvas.width, maskCanvas.height); - }); - const filepath = ComfyApp.clipspace.images; - const touched_image = new Image(); - - touched_image.onload = function() { - backupCanvas.width = touched_image.width; - backupCanvas.height = touched_image.height; - - prepareRGB(touched_image, backupCanvas, backupCtx); - }; - const alpha_url = new URL(ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src) alpha_url.searchParams.delete('channel'); alpha_url.searchParams.delete('preview'); alpha_url.searchParams.set('channel', 'a'); - touched_image.src = alpha_url; + let mask_image = await loadImage(alpha_url); // original image load - orig_image.onload = function() { - window.dispatchEvent(new Event('resize')); - }; - const rgb_url = new URL(ComfyApp.clipspace.imgs[ComfyApp.clipspace['selectedIndex']].src); rgb_url.searchParams.delete('channel'); rgb_url.searchParams.set('channel', 'rgb'); - orig_image.src = rgb_url; - this.image = orig_image; + this.image = new Image(); + this.image.onload = function() { + maskCanvas.width = self.image.width; + maskCanvas.height = self.image.height; + + self.invalidateCanvas(self.image, mask_image); + self.initializeCanvasPanZoom(); + }; + this.image.src = rgb_url; } - setEventHandler(maskCanvas) { - maskCanvas.addEventListener("contextmenu", (event) => { - event.preventDefault(); - }); + initializeCanvasPanZoom() { + // set initialize + let drawWidth = this.image.width; + let drawHeight = this.image.height; + + let width = this.element.clientWidth; + let height = this.element.clientHeight; + + if (this.image.width > width) { + drawWidth = width; + drawHeight = (drawWidth / this.image.width) * this.image.height; + } + + if (drawHeight > height) { + drawHeight = height; + drawWidth = (drawHeight / this.image.height) * this.image.width; + } + + this.zoom_ratio = drawWidth/this.image.width; + + const canvasX = (width - drawWidth) / 2; + const canvasY = (height - drawHeight) / 2; + this.pan_x = canvasX; + this.pan_y = canvasY; + + this.invalidatePanZoom(); + } + + + invalidatePanZoom() { + let raw_width = this.image.width * this.zoom_ratio; + let raw_height = this.image.height * this.zoom_ratio; + + if(this.pan_x + raw_width < 10) { + this.pan_x = 10 - raw_width; + } + + if(this.pan_y + raw_height < 10) { + this.pan_y = 10 - raw_height; + } + + let width = `${raw_width}px`; + let height = `${raw_height}px`; + let left = `${this.pan_x}px`; + let top = `${this.pan_y}px`; + + this.maskCanvas.style.width = width; + this.maskCanvas.style.height = height; + this.maskCanvas.style.left = left; + this.maskCanvas.style.top = top; + + this.imgCanvas.style.width = width; + this.imgCanvas.style.height = height; + this.imgCanvas.style.left = left; + this.imgCanvas.style.top = top; + } + + + setEventHandler(maskCanvas) { const self = this; - maskCanvas.addEventListener('wheel', (event) => this.handleWheelEvent(self,event)); - maskCanvas.addEventListener('pointerdown', (event) => this.handlePointerDown(self,event)); - document.addEventListener('pointerup', MaskEditorDialog.handlePointerUp); - maskCanvas.addEventListener('pointermove', (event) => this.draw_move(self,event)); - maskCanvas.addEventListener('touchmove', (event) => this.draw_move(self,event)); - maskCanvas.addEventListener('pointerover', (event) => { this.brush.style.display = "block"; }); - maskCanvas.addEventListener('pointerleave', (event) => { this.brush.style.display = "none"; }); - document.addEventListener('keydown', MaskEditorDialog.handleKeyDown); + + if(!this.handler_registered) { + maskCanvas.addEventListener("contextmenu", (event) => { + event.preventDefault(); + }); + + this.element.addEventListener('wheel', (event) => this.handleWheelEvent(self,event)); + this.element.addEventListener('pointermove', (event) => this.pointMoveEvent(self,event)); + this.element.addEventListener('touchmove', (event) => this.pointMoveEvent(self,event)); + + this.element.addEventListener('dragstart', (event) => { + if(event.ctrlKey) { + event.preventDefault(); + } + }); + + maskCanvas.addEventListener('pointerdown', (event) => this.handlePointerDown(self,event)); + maskCanvas.addEventListener('pointermove', (event) => this.draw_move(self,event)); + maskCanvas.addEventListener('touchmove', (event) => this.draw_move(self,event)); + maskCanvas.addEventListener('pointerover', (event) => { this.brush.style.display = "block"; }); + maskCanvas.addEventListener('pointerleave', (event) => { this.brush.style.display = "none"; }); + + document.addEventListener('pointerup', MaskEditorDialog.handlePointerUp); + + this.handler_registered = true; + } } brush_size = 10; @@ -378,8 +455,10 @@ class MaskEditorDialog extends ComfyDialog { const self = MaskEditorDialog.instance; if (event.key === ']') { self.brush_size = Math.min(self.brush_size+2, 100); + self.brush_slider_input.value = self.brush_size; } else if (event.key === '[') { self.brush_size = Math.max(self.brush_size-2, 1); + self.brush_slider_input.value = self.brush_size; } else if(event.key === 'Enter') { self.save(); } @@ -389,6 +468,10 @@ class MaskEditorDialog extends ComfyDialog { static handlePointerUp(event) { event.preventDefault(); + + this.mousedown_x = null; + this.mousedown_y = null; + MaskEditorDialog.instance.drawing_mode = false; } @@ -398,24 +481,70 @@ class MaskEditorDialog extends ComfyDialog { var centerX = self.cursorX; var centerY = self.cursorY; - brush.style.width = self.brush_size * 2 + "px"; - brush.style.height = self.brush_size * 2 + "px"; - brush.style.left = (centerX - self.brush_size) + "px"; - brush.style.top = (centerY - self.brush_size) + "px"; + brush.style.width = self.brush_size * 2 * this.zoom_ratio + "px"; + brush.style.height = self.brush_size * 2 * this.zoom_ratio + "px"; + brush.style.left = (centerX - self.brush_size * this.zoom_ratio) + "px"; + brush.style.top = (centerY - self.brush_size * this.zoom_ratio) + "px"; } handleWheelEvent(self, event) { - if(event.deltaY < 0) - self.brush_size = Math.min(self.brush_size+2, 100); - else - self.brush_size = Math.max(self.brush_size-2, 1); + event.preventDefault(); - self.brush_slider_input.value = self.brush_size; + if(event.ctrlKey) { + // zoom canvas + if(event.deltaY < 0) { + this.zoom_ratio = Math.min(10.0, this.zoom_ratio+0.2); + } + else { + this.zoom_ratio = Math.max(0.2, this.zoom_ratio-0.2); + } + + this.invalidatePanZoom(); + } + else { + // adjust brush size + if(event.deltaY < 0) + this.brush_size = Math.min(this.brush_size+2, 100); + else + this.brush_size = Math.max(this.brush_size-2, 1); + + this.brush_slider_input.value = this.brush_size; + + this.updateBrushPreview(this); + } + } + + pointMoveEvent(self, event) { + this.cursorX = event.pageX; + this.cursorY = event.pageY; self.updateBrushPreview(self); + + if(event.ctrlKey) { + event.preventDefault(); + self.pan_move(self, event); + } + } + + pan_move(self, event) { + if(event.buttons == 1) { + if(this.mousedown_x) { + let deltaX = this.mousedown_x - event.clientX; + let deltaY = this.mousedown_y - event.clientY; + + self.pan_x = this.mousedown_pan_x - deltaX; + self.pan_y = this.mousedown_pan_y - deltaY; + + self.invalidatePanZoom(); + } + } } draw_move(self, event) { + if(event.ctrlKey) { + return; + } + event.preventDefault(); this.cursorX = event.pageX; @@ -439,6 +568,9 @@ class MaskEditorDialog extends ComfyDialog { y = event.targetTouches[0].clientY - maskRect.top; } + x /= self.zoom_ratio; + y /= self.zoom_ratio; + var brush_size = this.brush_size; if(event instanceof PointerEvent && event.pointerType == 'pen') { brush_size *= event.pressure; @@ -489,8 +621,8 @@ class MaskEditorDialog extends ComfyDialog { } else if(event.buttons == 2 || event.buttons == 5 || event.buttons == 32) { const maskRect = self.maskCanvas.getBoundingClientRect(); - const x = event.offsetX || event.targetTouches[0].clientX - maskRect.left; - const y = event.offsetY || event.targetTouches[0].clientY - maskRect.top; + const x = (event.offsetX || event.targetTouches[0].clientX - maskRect.left) / self.zoom_ratio; + const y = (event.offsetY || event.targetTouches[0].clientY - maskRect.top) / self.zoom_ratio; var brush_size = this.brush_size; if(event instanceof PointerEvent && event.pointerType == 'pen') { @@ -540,6 +672,17 @@ class MaskEditorDialog extends ComfyDialog { } handlePointerDown(self, event) { + if(event.ctrlKey) { + if (event.buttons == 1) { + this.mousedown_x = event.clientX; + this.mousedown_y = event.clientY; + + this.mousedown_pan_x = this.pan_x; + this.mousedown_pan_y = this.pan_y; + } + return; + } + var brush_size = this.brush_size; if(event instanceof PointerEvent && event.pointerType == 'pen') { brush_size *= event.pressure; @@ -551,8 +694,8 @@ class MaskEditorDialog extends ComfyDialog { event.preventDefault(); const maskRect = self.maskCanvas.getBoundingClientRect(); - const x = event.offsetX || event.targetTouches[0].clientX - maskRect.left; - const y = event.offsetY || event.targetTouches[0].clientY - maskRect.top; + const x = (event.offsetX || event.targetTouches[0].clientX - maskRect.left) / self.zoom_ratio; + const y = (event.offsetY || event.targetTouches[0].clientY - maskRect.top) / self.zoom_ratio; self.maskCtx.beginPath(); if (event.button == 0) { @@ -570,15 +713,18 @@ class MaskEditorDialog extends ComfyDialog { } async save() { - const backupCtx = this.backupCanvas.getContext('2d', {willReadFrequently:true}); + const backupCanvas = document.createElement('canvas'); + const backupCtx = backupCanvas.getContext('2d', {willReadFrequently:true}); + backupCanvas.width = this.image.width; + backupCanvas.height = this.image.height; - backupCtx.clearRect(0,0,this.backupCanvas.width,this.backupCanvas.height); + backupCtx.clearRect(0,0, backupCanvas.width, backupCanvas.height); backupCtx.drawImage(this.maskCanvas, 0, 0, this.maskCanvas.width, this.maskCanvas.height, - 0, 0, this.backupCanvas.width, this.backupCanvas.height); + 0, 0, backupCanvas.width, backupCanvas.height); // paste mask data into alpha channel - const backupData = backupCtx.getImageData(0, 0, this.backupCanvas.width, this.backupCanvas.height); + const backupData = backupCtx.getImageData(0, 0, backupCanvas.width, backupCanvas.height); // refine mask image for (let i = 0; i < backupData.data.length; i += 4) { @@ -615,7 +761,7 @@ class MaskEditorDialog extends ComfyDialog { ComfyApp.clipspace.widgets[index].value = item; } - const dataURL = this.backupCanvas.toDataURL(); + const dataURL = backupCanvas.toDataURL(); const blob = dataURLToBlob(dataURL); let original_url = new URL(this.image.src); From 2db86b4676ed2b5c8551beea25dd2ef3fe3c4f66 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 6 Dec 2023 05:13:14 -0500 Subject: [PATCH 283/420] Slightly faster lora applying. --- comfy/model_management.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 94d5969698d..3588d350304 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -550,12 +550,12 @@ def cast_to_device(tensor, device, dtype, copy=False): if device_supports_cast: if copy: if tensor.device == device: - return tensor.to(dtype, copy=copy) - return tensor.to(device, copy=copy).to(dtype) + return tensor.to(dtype, copy=copy, non_blocking=True) + return tensor.to(device, copy=copy, non_blocking=True).to(dtype, non_blocking=True) else: - return tensor.to(device).to(dtype) + return tensor.to(device, non_blocking=True).to(dtype, non_blocking=True) else: - return tensor.to(dtype).to(device, copy=copy) + return tensor.to(device, dtype, copy=copy, non_blocking=True) def xformers_enabled(): global directml_enabled From 03eadbb53c82954ae5e42efa44903ed1319ff3d6 Mon Sep 17 00:00:00 2001 From: asagi4 <130366179+asagi4@users.noreply.github.com> Date: Wed, 6 Dec 2023 21:12:49 +0200 Subject: [PATCH 284/420] Make HyperTile deterministic --- comfy_extras/nodes_hypertile.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index 0d7d4c95483..15736b835bb 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -2,9 +2,10 @@ import math from einops import rearrange -import random +# Use torch rng for consistency across generations +from torch import randint -def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int: +def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: min_value = min(min_value, value) # All big divisors of value (inclusive) @@ -12,8 +13,7 @@ def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter ns = [value // i for i in divisors[:max_options]] # has at least 1 element - random.seed(counter) - idx = random.randint(0, len(ns) - 1) + idx = randint(low=0, high=len(ns) - 1, size=(1,)).item() return ns[idx] @@ -42,7 +42,6 @@ def patch(self, model, tile_size, swap_size, max_depth, scale_depth): latent_tile_size = max(32, tile_size) // 8 self.temp = None - self.counter = 1 def hypertile_in(q, k, v, extra_options): if q.shape[-1] in apply_to: @@ -53,10 +52,8 @@ def hypertile_in(q, k, v, extra_options): h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1 - nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter) - self.counter += 1 - nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter) - self.counter += 1 + nh = random_divisor(h, latent_tile_size * factor, swap_size) + nw = random_divisor(w, latent_tile_size * factor, swap_size) if nh * nw > 1: q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw) From fbdb14d4c4c3d2e783d585506c6b598487ec7a9d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 6 Dec 2023 15:55:09 -0500 Subject: [PATCH 285/420] Cleaner CLIP text encoder implementation. Use a simple CLIP model implementation instead of the one from transformers. This will allow some interesting things that would too hackish to implement using the transformers implementation. --- comfy/clip_model.py | 126 +++++++++++++++++++++++++++++++++ comfy/ldm/modules/attention.py | 23 ++++-- comfy/sd1_clip.py | 62 ++++++---------- comfy/sd2_clip.py | 6 +- comfy/sdxl_clip.py | 6 +- 5 files changed, 173 insertions(+), 50 deletions(-) create mode 100644 comfy/clip_model.py diff --git a/comfy/clip_model.py b/comfy/clip_model.py new file mode 100644 index 00000000000..e6a7bfa66f1 --- /dev/null +++ b/comfy/clip_model.py @@ -0,0 +1,126 @@ +import torch +from comfy.ldm.modules.attention import optimized_attention_for_device + +class CLIPAttention(torch.nn.Module): + def __init__(self, embed_dim, heads, dtype, device, operations): + super().__init__() + + self.heads = heads + self.q_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.k_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + self.v_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + + self.out_proj = operations.Linear(embed_dim, embed_dim, bias=True, dtype=dtype, device=device) + + def forward(self, x, mask=None, optimized_attention=None): + q = self.q_proj(x) + k = self.k_proj(x) + v = self.v_proj(x) + + out = optimized_attention(q, k, v, self.heads, mask) + return self.out_proj(out) + +ACTIVATIONS = {"quick_gelu": lambda a: a * torch.sigmoid(1.702 * a), + "gelu": torch.nn.functional.gelu, +} + +class CLIPMLP(torch.nn.Module): + def __init__(self, embed_dim, intermediate_size, activation, dtype, device, operations): + super().__init__() + self.fc1 = operations.Linear(embed_dim, intermediate_size, bias=True, dtype=dtype, device=device) + self.activation = ACTIVATIONS[activation] + self.fc2 = operations.Linear(intermediate_size, embed_dim, bias=True, dtype=dtype, device=device) + + def forward(self, x): + x = self.fc1(x) + x = self.activation(x) + x = self.fc2(x) + return x + +class CLIPLayer(torch.nn.Module): + def __init__(self, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): + super().__init__() + self.layer_norm1 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) + self.self_attn = CLIPAttention(embed_dim, heads, dtype, device, operations) + self.layer_norm2 = operations.LayerNorm(embed_dim, dtype=dtype, device=device) + self.mlp = CLIPMLP(embed_dim, intermediate_size, intermediate_activation, dtype, device, operations) + + def forward(self, x, mask=None, optimized_attention=None): + x += self.self_attn(self.layer_norm1(x), mask, optimized_attention) + x += self.mlp(self.layer_norm2(x)) + return x + + +class CLIPEncoder(torch.nn.Module): + def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations): + super().__init__() + self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) + + def forward(self, x, mask=None, intermediate_output=None): + optimized_attention = optimized_attention_for_device(x.device, mask=True) + causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) + if mask is not None: + mask += causal_mask + else: + mask = causal_mask + + if intermediate_output is not None: + if intermediate_output < 0: + intermediate_output = len(self.layers) + intermediate_output + + intermediate = None + for i, l in enumerate(self.layers): + x = l(x, mask, optimized_attention) + if i == intermediate_output: + intermediate = x.clone() + return x, intermediate + +class CLIPEmbeddings(torch.nn.Module): + def __init__(self, embed_dim, vocab_size=49408, num_positions=77, dtype=None, device=None): + super().__init__() + self.token_embedding = torch.nn.Embedding(vocab_size, embed_dim, dtype=dtype, device=device) + self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens): + return self.token_embedding(input_tokens) + self.position_embedding.weight + + +class CLIPTextModel_(torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + num_layers = config_dict["num_hidden_layers"] + embed_dim = config_dict["hidden_size"] + heads = config_dict["num_attention_heads"] + intermediate_size = config_dict["intermediate_size"] + intermediate_activation = config_dict["hidden_act"] + + super().__init__() + self.embeddings = CLIPEmbeddings(embed_dim, dtype=torch.float32, device=device) + self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) + self.final_layer_norm = operations.LayerNorm(embed_dim, dtype=dtype, device=device) + + def forward(self, input_tokens, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True): + x = self.embeddings(input_tokens) + #TODO: attention_mask + x, i = self.encoder(x, intermediate_output=intermediate_output) + x = self.final_layer_norm(x) + if i is not None and final_layer_norm_intermediate: + i = self.final_layer_norm(i) + + pooled_output = x[torch.arange(x.shape[0], device=x.device), input_tokens.to(dtype=torch.int, device=x.device).argmax(dim=-1),] + return x, i, pooled_output + +class CLIPTextModel(torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + self.num_layers = config_dict["num_hidden_layers"] + self.text_model = CLIPTextModel_(config_dict, dtype, device, operations) + self.dtype = dtype + + def get_input_embeddings(self): + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, embeddings): + self.text_model.embeddings.token_embedding = embeddings + + def forward(self, *args, **kwargs): + return self.text_model(*args, **kwargs) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index d3348c4722c..8299b1d94bb 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -112,10 +112,13 @@ def attention_basic(q, k, v, heads, mask=None): del q, k if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) + if mask.dtype == torch.bool: + mask = rearrange(mask, 'b ... -> b (...)') #TODO: check if this bool part matches pytorch attention + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + else: + sim += mask # attention, what we cannot get enough of sim = sim.softmax(dim=-1) @@ -340,6 +343,18 @@ def attention_pytorch(q, k, v, heads, mask=None): if model_management.pytorch_attention_enabled(): optimized_attention_masked = attention_pytorch +def optimized_attention_for_device(device, mask=False): + if device == torch.device("cpu"): #TODO + if model_management.pytorch_attention_enabled(): + return attention_pytorch + else: + return attention_basic + if mask: + return optimized_attention_masked + + return optimized_attention + + class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): super().__init__() diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4e9f6bffe01..1acd972c435 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -1,12 +1,14 @@ import os -from transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig, modeling_utils +from transformers import CLIPTokenizer import comfy.ops import torch import traceback import zipfile from . import model_management import contextlib +import comfy.clip_model +import json def gen_empty_tokens(special_tokens, length): start_token = special_tokens.get("start", None) @@ -65,35 +67,19 @@ class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): "hidden" ] def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_length=77, - freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, textmodel_path=None, dtype=None, - special_tokens={"start": 49406, "end": 49407, "pad": 49407},layer_norm_hidden_state=True, config_class=CLIPTextConfig, - model_class=CLIPTextModel, inner_name="text_model"): # clip-vit-base-patch32 + freeze=True, layer="last", layer_idx=None, textmodel_json_config=None, dtype=None, model_class=comfy.clip_model.CLIPTextModel, + special_tokens={"start": 49406, "end": 49407, "pad": 49407}, layer_norm_hidden_state=True): # clip-vit-base-patch32 super().__init__() assert layer in self.LAYERS - self.num_layers = 12 - if textmodel_path is not None: - self.transformer = model_class.from_pretrained(textmodel_path) - else: - if textmodel_json_config is None: - textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") - config = config_class.from_json_file(textmodel_json_config) - self.num_layers = config.num_hidden_layers - with comfy.ops.use_comfy_ops(device, dtype): - with modeling_utils.no_init_weights(): - self.transformer = model_class(config) - - self.inner_name = inner_name - if dtype is not None: - inner_model = getattr(self.transformer, self.inner_name) - if hasattr(inner_model, "embeddings"): - embeddings_bak = inner_model.embeddings.to(torch.float32) - inner_model.embeddings = None - self.transformer.to(dtype) - inner_model.embeddings = embeddings_bak - else: - previous_inputs = self.transformer.get_input_embeddings().to(torch.float32, copy=True) - self.transformer.to(dtype) - self.transformer.set_input_embeddings(previous_inputs) + + if textmodel_json_config is None: + textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_clip_config.json") + + with open(textmodel_json_config) as f: + config = json.load(f) + + self.transformer = model_class(config, dtype, device, comfy.ops) + self.num_layers = self.transformer.num_layers self.max_length = max_length if freeze: @@ -108,7 +94,7 @@ def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_le self.layer_norm_hidden_state = layer_norm_hidden_state if layer == "hidden": assert layer_idx is not None - assert abs(layer_idx) <= self.num_layers + assert abs(layer_idx) < self.num_layers self.clip_layer(layer_idx) self.layer_default = (self.layer, self.layer_idx) @@ -119,7 +105,7 @@ def freeze(self): param.requires_grad = False def clip_layer(self, layer_idx): - if abs(layer_idx) >= self.num_layers: + if abs(layer_idx) > self.num_layers: self.layer = "last" else: self.layer = "hidden" @@ -174,7 +160,7 @@ def forward(self, tokens): tokens = self.set_up_textual_embeddings(tokens, backup_embeds) tokens = torch.LongTensor(tokens).to(device) - if getattr(self.transformer, self.inner_name).final_layer_norm.weight.dtype != torch.float32: + if self.transformer.dtype != torch.float32: precision_scope = torch.autocast else: precision_scope = lambda a, dtype: contextlib.nullcontext(a) @@ -190,20 +176,16 @@ def forward(self, tokens): if tokens[x, y] == max_token: break - outputs = self.transformer(input_ids=tokens, attention_mask=attention_mask, output_hidden_states=self.layer=="hidden") + outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) self.transformer.set_input_embeddings(backup_embeds) if self.layer == "last": - z = outputs.last_hidden_state - elif self.layer == "pooled": - z = outputs.pooler_output[:, None, :] + z = outputs[0] else: - z = outputs.hidden_states[self.layer_idx] - if self.layer_norm_hidden_state: - z = getattr(self.transformer, self.inner_name).final_layer_norm(z) + z = outputs[1] - if hasattr(outputs, "pooler_output"): - pooled_output = outputs.pooler_output.float() + if outputs[2] is not None: + pooled_output = outputs[2].float() else: pooled_output = None diff --git a/comfy/sd2_clip.py b/comfy/sd2_clip.py index 2ee0ca05586..9c878d54ab6 100644 --- a/comfy/sd2_clip.py +++ b/comfy/sd2_clip.py @@ -3,13 +3,13 @@ import os class SD2ClipHModel(sd1_clip.SDClipModel): - def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): + def __init__(self, arch="ViT-H-14", device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None): if layer == "penultimate": layer="hidden" - layer_idx=23 + layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd2_clip_config.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}) + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}) class SD2ClipHTokenizer(sd1_clip.SDTokenizer): def __init__(self, tokenizer_path=None, embedding_directory=None): diff --git a/comfy/sdxl_clip.py b/comfy/sdxl_clip.py index 673399e2222..b35056bb9d6 100644 --- a/comfy/sdxl_clip.py +++ b/comfy/sdxl_clip.py @@ -3,13 +3,13 @@ import os class SDXLClipG(sd1_clip.SDClipModel): - def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, textmodel_path=None, dtype=None): + def __init__(self, device="cpu", max_length=77, freeze=True, layer="penultimate", layer_idx=None, dtype=None): if layer == "penultimate": layer="hidden" layer_idx=-2 textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "clip_config_bigg.json") - super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype, + super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, dtype=dtype, special_tokens={"start": 49406, "end": 49407, "pad": 0}, layer_norm_hidden_state=False) def load_sd(self, sd): @@ -37,7 +37,7 @@ def untokenize(self, token_weight_pair): class SDXLClipModel(torch.nn.Module): def __init__(self, device="cpu", dtype=None): super().__init__() - self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=11, device=device, dtype=dtype, layer_norm_hidden_state=False) + self.clip_l = sd1_clip.SDClipModel(layer="hidden", layer_idx=-2, device=device, dtype=dtype, layer_norm_hidden_state=False) self.clip_g = SDXLClipG(device=device, dtype=dtype) def clip_layer(self, layer_idx): From efb704c758f916bdf3b8fcaa3c2ade69d03a27f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Dec 2023 02:51:02 -0500 Subject: [PATCH 286/420] Support attention masking in CLIP implementation. --- comfy/clip_model.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/comfy/clip_model.py b/comfy/clip_model.py index e6a7bfa66f1..c61353dcf7a 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -100,8 +100,12 @@ def __init__(self, config_dict, dtype, device, operations): def forward(self, input_tokens, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True): x = self.embeddings(input_tokens) - #TODO: attention_mask - x, i = self.encoder(x, intermediate_output=intermediate_output) + mask = None + if attention_mask is not None: + mask = 1.0 - attention_mask.to(x.dtype).unsqueeze(1).unsqueeze(1).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) + mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) + + x, i = self.encoder(x, mask=mask, intermediate_output=intermediate_output) x = self.final_layer_norm(x) if i is not None and final_layer_norm_intermediate: i = self.final_layer_norm(i) From cdff08102346f34b6d5bbe65f036a6731e685285 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 7 Dec 2023 15:22:35 -0500 Subject: [PATCH 287/420] Fix hypertile. --- comfy_extras/nodes_hypertile.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index 15736b835bb..e7446b2e540 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -13,7 +13,10 @@ def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int: ns = [value // i for i in divisors[:max_options]] # has at least 1 element - idx = randint(low=0, high=len(ns) - 1, size=(1,)).item() + if len(ns) - 1 > 0: + idx = randint(low=0, high=len(ns) - 1, size=(1,)).item() + else: + idx = 0 return ns[idx] From 9ac0b487acf569ebe8a2d87ed750fed58b59262d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Dec 2023 02:35:45 -0500 Subject: [PATCH 288/420] Make --gpu-only put intermediate values in GPU memory instead of cpu. --- comfy/clip_vision.py | 4 ++-- comfy/model_management.py | 6 ++++++ comfy/sample.py | 4 ++-- comfy/sd.py | 23 ++++++++++++----------- comfy/sd1_clip.py | 6 +++--- comfy/utils.py | 12 ++++++------ comfy_extras/nodes_canny.py | 2 +- comfy_extras/nodes_post_processing.py | 2 +- nodes.py | 6 +++--- 9 files changed, 36 insertions(+), 29 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 9e2e03d7238..449be8e447a 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -54,10 +54,10 @@ def encode_image(self, image): t = outputs[k] if t is not None: if k == 'hidden_states': - outputs["penultimate_hidden_states"] = t[-2].cpu() + outputs["penultimate_hidden_states"] = t[-2].to(comfy.model_management.intermediate_device()) outputs["hidden_states"] = None else: - outputs[k] = t.cpu() + outputs[k] = t.to(comfy.model_management.intermediate_device()) return outputs diff --git a/comfy/model_management.py b/comfy/model_management.py index 3588d350304..ef9bec54563 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -508,6 +508,12 @@ def text_encoder_dtype(device=None): else: return torch.float32 +def intermediate_device(): + if args.gpu_only: + return get_torch_device() + else: + return torch.device("cpu") + def vae_device(): return get_torch_device() diff --git a/comfy/sample.py b/comfy/sample.py index bcbed3343d6..eadd6dcc864 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -98,7 +98,7 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative sampler = comfy.samplers.KSampler(real_model, steps=steps, device=model.load_device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) - samples = samples.cpu() + samples = samples.to(comfy.model_management.intermediate_device()) cleanup_additional_models(models) cleanup_additional_models(set(get_models_from_cond(positive_copy, "control") + get_models_from_cond(negative_copy, "control"))) @@ -111,7 +111,7 @@ def sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent sigmas = sigmas.to(model.load_device) samples = comfy.samplers.sample(real_model, noise, positive_copy, negative_copy, cfg, model.load_device, sampler, sigmas, model_options=model.model_options, latent_image=latent_image, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) - samples = samples.cpu() + samples = samples.to(comfy.model_management.intermediate_device()) cleanup_additional_models(models) cleanup_additional_models(set(get_models_from_cond(positive_copy, "control") + get_models_from_cond(negative_copy, "control"))) return samples diff --git a/comfy/sd.py b/comfy/sd.py index f4f84d0a032..43e201d363b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -190,6 +190,7 @@ def __init__(self, sd=None, device=None, config=None): offload_device = model_management.vae_offload_device() self.vae_dtype = model_management.vae_dtype() self.first_stage_model.to(self.vae_dtype) + self.output_device = model_management.intermediate_device() self.patcher = comfy.model_patcher.ModelPatcher(self.first_stage_model, load_device=self.device, offload_device=offload_device) @@ -201,9 +202,9 @@ def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16): decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float() output = torch.clamp(( - (comfy.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, pbar = pbar) + - comfy.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, pbar = pbar) + - comfy.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, pbar = pbar)) + (comfy.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + + comfy.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + + comfy.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar)) / 3.0) / 2.0, min=0.0, max=1.0) return output @@ -214,9 +215,9 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): pbar = comfy.utils.ProgressBar(steps) encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float() - samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) - samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) - samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, pbar=pbar) + samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) + samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) + samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) samples /= 3.0 return samples @@ -228,15 +229,15 @@ def decode(self, samples_in): batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device="cpu") + pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device=self.output_device) for x in range(0, samples_in.shape[0], batch_number): samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device) - pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).cpu().float() + 1.0) / 2.0, min=0.0, max=1.0) + pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).to(self.output_device).float() + 1.0) / 2.0, min=0.0, max=1.0) except model_management.OOM_EXCEPTION as e: print("Warning: Ran out of memory when regular VAE decoding, retrying with tiled VAE decoding.") pixel_samples = self.decode_tiled_(samples_in) - pixel_samples = pixel_samples.cpu().movedim(1,-1) + pixel_samples = pixel_samples.to(self.output_device).movedim(1,-1) return pixel_samples def decode_tiled(self, samples, tile_x=64, tile_y=64, overlap = 16): @@ -252,10 +253,10 @@ def encode(self, pixel_samples): free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device="cpu") + samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device=self.output_device) for x in range(0, pixel_samples.shape[0], batch_number): pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device) - samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).cpu().float() + samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float() except model_management.OOM_EXCEPTION as e: print("Warning: Ran out of memory when regular VAE encoding, retrying with tiled VAE encoding.") diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 1acd972c435..4530168ab7a 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -39,7 +39,7 @@ def encode_token_weights(self, token_weight_pairs): out, pooled = self.encode(to_encode) if pooled is not None: - first_pooled = pooled[0:1].cpu() + first_pooled = pooled[0:1].to(model_management.intermediate_device()) else: first_pooled = pooled @@ -56,8 +56,8 @@ def encode_token_weights(self, token_weight_pairs): output.append(z) if (len(output) == 0): - return out[-1:].cpu(), first_pooled - return torch.cat(output, dim=-2).cpu(), first_pooled + return out[-1:].to(model_management.intermediate_device()), first_pooled + return torch.cat(output, dim=-2).to(model_management.intermediate_device()), first_pooled class SDClipModel(torch.nn.Module, ClipTokenWeightEncoder): """Uses the CLIP transformer encoder for text (from huggingface)""" diff --git a/comfy/utils.py b/comfy/utils.py index 50557704736..f8026ddab9d 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -376,7 +376,7 @@ def lanczos(samples, width, height): images = [image.resize((width, height), resample=Image.Resampling.LANCZOS) for image in images] images = [torch.from_numpy(np.array(image).astype(np.float32) / 255.0).movedim(-1, 0) for image in images] result = torch.stack(images) - return result + return result.to(samples.device, samples.dtype) def common_upscale(samples, width, height, upscale_method, crop): if crop == "center": @@ -405,17 +405,17 @@ def get_tiled_scale_steps(width, height, tile_x, tile_y, overlap): return math.ceil((height / (tile_y - overlap))) * math.ceil((width / (tile_x - overlap))) @torch.inference_mode() -def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_amount = 4, out_channels = 3, pbar = None): - output = torch.empty((samples.shape[0], out_channels, round(samples.shape[2] * upscale_amount), round(samples.shape[3] * upscale_amount)), device="cpu") +def tiled_scale(samples, function, tile_x=64, tile_y=64, overlap = 8, upscale_amount = 4, out_channels = 3, output_device="cpu", pbar = None): + output = torch.empty((samples.shape[0], out_channels, round(samples.shape[2] * upscale_amount), round(samples.shape[3] * upscale_amount)), device=output_device) for b in range(samples.shape[0]): s = samples[b:b+1] - out = torch.zeros((s.shape[0], out_channels, round(s.shape[2] * upscale_amount), round(s.shape[3] * upscale_amount)), device="cpu") - out_div = torch.zeros((s.shape[0], out_channels, round(s.shape[2] * upscale_amount), round(s.shape[3] * upscale_amount)), device="cpu") + out = torch.zeros((s.shape[0], out_channels, round(s.shape[2] * upscale_amount), round(s.shape[3] * upscale_amount)), device=output_device) + out_div = torch.zeros((s.shape[0], out_channels, round(s.shape[2] * upscale_amount), round(s.shape[3] * upscale_amount)), device=output_device) for y in range(0, s.shape[2], tile_y - overlap): for x in range(0, s.shape[3], tile_x - overlap): s_in = s[:,:,y:y+tile_y,x:x+tile_x] - ps = function(s_in).cpu() + ps = function(s_in).to(output_device) mask = torch.ones_like(ps) feather = round(overlap * upscale_amount) for t in range(feather): diff --git a/comfy_extras/nodes_canny.py b/comfy_extras/nodes_canny.py index 94d453f2ca5..730dded5fd4 100644 --- a/comfy_extras/nodes_canny.py +++ b/comfy_extras/nodes_canny.py @@ -291,7 +291,7 @@ def INPUT_TYPES(s): def detect_edge(self, image, low_threshold, high_threshold): output = canny(image.to(comfy.model_management.get_torch_device()).movedim(-1, 1), low_threshold, high_threshold) - img_out = output[1].cpu().repeat(1, 3, 1, 1).movedim(1, -1) + img_out = output[1].to(comfy.model_management.intermediate_device()).repeat(1, 3, 1, 1).movedim(1, -1) return (img_out,) NODE_CLASS_MAPPINGS = { diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 12704f545d6..71660f8a525 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -226,7 +226,7 @@ def sharpen(self, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: batch_size, height, width, channels = image.shape kernel_size = sharpen_radius * 2 + 1 - kernel = gaussian_kernel(kernel_size, sigma) * -(alpha*10) + kernel = gaussian_kernel(kernel_size, sigma, device=image.device) * -(alpha*10) center = kernel_size // 2 kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0 kernel = kernel.repeat(channels, 1, 1).unsqueeze(1) diff --git a/nodes.py b/nodes.py index 24e591fdde8..db96e0e2d66 100644 --- a/nodes.py +++ b/nodes.py @@ -947,8 +947,8 @@ def append(self, conditioning_to, clip, gligen_textbox_model, text, width, heigh return (c, ) class EmptyLatentImage: - def __init__(self, device="cpu"): - self.device = device + def __init__(self): + self.device = comfy.model_management.intermediate_device() @classmethod def INPUT_TYPES(s): @@ -961,7 +961,7 @@ def INPUT_TYPES(s): CATEGORY = "latent" def generate(self, width, height, batch_size=1): - latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device) return ({"samples":latent}, ) From a4ec54a40d978c4249dc6a7e2d5133657d1fd109 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Dec 2023 02:49:30 -0500 Subject: [PATCH 289/420] Add linear_start and linear_end to model_config.sampling_settings --- comfy/model_sampling.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/comfy/model_sampling.py b/comfy/model_sampling.py index 69c8b1f01fc..cc8745c1064 100644 --- a/comfy/model_sampling.py +++ b/comfy/model_sampling.py @@ -22,10 +22,17 @@ def calculate_denoised(self, sigma, model_output, model_input): class ModelSamplingDiscrete(torch.nn.Module): def __init__(self, model_config=None): super().__init__() - beta_schedule = "linear" + if model_config is not None: - beta_schedule = model_config.sampling_settings.get("beta_schedule", beta_schedule) - self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3) + sampling_settings = model_config.sampling_settings + else: + sampling_settings = {} + + beta_schedule = sampling_settings.get("beta_schedule", "linear") + linear_start = sampling_settings.get("linear_start", 0.00085) + linear_end = sampling_settings.get("linear_end", 0.012) + + self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=linear_start, linear_end=linear_end, cosine_s=8e-3) self.sigma_data = 1.0 def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, From 97015b6b383718bdc65cb617e3050069a156679d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 8 Dec 2023 16:02:08 -0500 Subject: [PATCH 290/420] Cleanup. --- comfy/samplers.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 1d012a514a7..ffc1fe3acb8 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -276,10 +276,7 @@ def forward(self, x, sigma, uncond, cond, cond_scale, denoise_mask, model_option x = x * denoise_mask + (self.latent_image + self.noise * sigma.reshape([sigma.shape[0]] + [1] * (len(self.noise.shape) - 1))) * latent_mask out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed) if denoise_mask is not None: - out *= denoise_mask - - if denoise_mask is not None: - out += self.latent_image * latent_mask + out = out * denoise_mask + self.latent_image * latent_mask return out def simple_scheduler(model, steps): From 9aaf368a415d23cabd80ae30ba7e4bb918635b4a Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 9 Dec 2023 13:04:35 +0000 Subject: [PATCH 291/420] Fix internal reroutes connected to other groups --- tests-ui/tests/groupNode.test.js | 26 ++++++++++++++++++++++++++ web/extensions/core/groupNode.js | 4 ++++ 2 files changed, 30 insertions(+) diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js index ce54c11542c..9bcb19e93be 100644 --- a/tests-ui/tests/groupNode.test.js +++ b/tests-ui/tests/groupNode.test.js @@ -383,6 +383,32 @@ describe("group node", () => { getOutput([nodes.pos.id, nodes.neg.id, nodes.empty.id, nodes.sampler.id]) ); }); + test("groups can connect to each other via internal reroutes", async () => { + const { ez, graph, app } = await start(); + + const latent = ez.EmptyLatentImage(); + const vae = ez.VAELoader(); + const latentReroute = ez.Reroute(); + const vaeReroute = ez.Reroute(); + + latent.outputs[0].connectTo(latentReroute.inputs[0]); + vae.outputs[0].connectTo(vaeReroute.inputs[0]); + + const group1 = await convertToGroup(app, graph, "test", [latentReroute, vaeReroute]); + group1.menu.Clone.call(); + expect(app.graph._nodes).toHaveLength(4); + const group2 = graph.find(app.graph._nodes[3]); + expect(group2.node.type).toEqual("workflow/test"); + expect(group2.id).not.toEqual(group1.id); + + group1.outputs.VAE.connectTo(group2.inputs.VAE); + group1.outputs.LATENT.connectTo(group2.inputs.LATENT); + + const decode = ez.VAEDecode(group2.outputs.LATENT, group2.outputs.VAE); + ez.PreviewImage(decode.outputs[0]); + + expect((await graph.toPrompt()).output).toEqual({}); + }); test("displays generated image on group node", async () => { const { ez, graph, app } = await start(); const nodes = createDefaultWorkflow(ez, graph); diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 6766f356d42..9a1d9b20760 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -602,6 +602,10 @@ export class GroupNodeHandler { innerNode = innerNode.getInputNode(0); } + if (l && GroupNodeHandler.isGroupNode(innerNode)) { + return innerNode.updateLink(l); + } + link.origin_id = innerNode.id; link.origin_slot = l?.origin_slot ?? output.slot; return link; From 080ef75c3148060bfccdf82f5f063e9a0cdacd0d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 9 Dec 2023 13:19:21 +0000 Subject: [PATCH 292/420] fix --- tests-ui/tests/groupNode.test.js | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js index 9bcb19e93be..dc9d4bd49af 100644 --- a/tests-ui/tests/groupNode.test.js +++ b/tests-ui/tests/groupNode.test.js @@ -405,9 +405,14 @@ describe("group node", () => { group1.outputs.LATENT.connectTo(group2.inputs.LATENT); const decode = ez.VAEDecode(group2.outputs.LATENT, group2.outputs.VAE); - ez.PreviewImage(decode.outputs[0]); + const preview = ez.PreviewImage(decode.outputs[0]); - expect((await graph.toPrompt()).output).toEqual({}); + expect((await graph.toPrompt()).output).toEqual({ + [latent.id]: { inputs: { width: 512, height: 512, batch_size: 1 }, class_type: "EmptyLatentImage" }, + [vae.id]: { inputs: { vae_name: "vae1.safetensors" }, class_type: "VAELoader" }, + [decode.id]: { inputs: { samples: [latent.id + "", 0], vae: [vae.id + "", 0] }, class_type: "VAEDecode" }, + [preview.id]: { inputs: { images: [decode.id + "", 0] }, class_type: "PreviewImage" }, + }); }); test("displays generated image on group node", async () => { const { ez, graph, app } = await start(); From 174eba8e957b4b885d4d510d53dca859226ba9ef Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Dec 2023 11:56:31 -0500 Subject: [PATCH 293/420] Use own clip vision model implementation. --- comfy/clip_model.py | 70 ++++++++++++++++++++++++++++++++++++++++---- comfy/clip_vision.py | 33 +++++++++++---------- 2 files changed, 81 insertions(+), 22 deletions(-) diff --git a/comfy/clip_model.py b/comfy/clip_model.py index c61353dcf7a..850b5fdbecb 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -57,12 +57,7 @@ def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) def forward(self, x, mask=None, intermediate_output=None): - optimized_attention = optimized_attention_for_device(x.device, mask=True) - causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) - if mask is not None: - mask += causal_mask - else: - mask = causal_mask + optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None) if intermediate_output is not None: if intermediate_output < 0: @@ -105,6 +100,12 @@ def forward(self, input_tokens, attention_mask=None, intermediate_output=None, f mask = 1.0 - attention_mask.to(x.dtype).unsqueeze(1).unsqueeze(1).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]) mask = mask.masked_fill(mask.to(torch.bool), float("-inf")) + causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1) + if mask is not None: + mask += causal_mask + else: + mask = causal_mask + x, i = self.encoder(x, mask=mask, intermediate_output=intermediate_output) x = self.final_layer_norm(x) if i is not None and final_layer_norm_intermediate: @@ -128,3 +129,60 @@ def set_input_embeddings(self, embeddings): def forward(self, *args, **kwargs): return self.text_model(*args, **kwargs) + +class CLIPVisionEmbeddings(torch.nn.Module): + def __init__(self, embed_dim, num_channels=3, patch_size=14, image_size=224, dtype=None, device=None, operations=None): + super().__init__() + self.class_embedding = torch.nn.Parameter(torch.empty(embed_dim, dtype=dtype, device=device)) + + self.patch_embedding = operations.Conv2d( + in_channels=num_channels, + out_channels=embed_dim, + kernel_size=patch_size, + stride=patch_size, + bias=False, + dtype=dtype, + device=device + ) + + num_patches = (image_size // patch_size) ** 2 + num_positions = num_patches + 1 + self.position_embedding = torch.nn.Embedding(num_positions, embed_dim, dtype=dtype, device=device) + + def forward(self, pixel_values): + embeds = self.patch_embedding(pixel_values).flatten(2).transpose(1, 2) + return torch.cat([self.class_embedding.expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight + + +class CLIPVision(torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + num_layers = config_dict["num_hidden_layers"] + embed_dim = config_dict["hidden_size"] + heads = config_dict["num_attention_heads"] + intermediate_size = config_dict["intermediate_size"] + intermediate_activation = config_dict["hidden_act"] + + self.embeddings = CLIPVisionEmbeddings(embed_dim, config_dict["num_channels"], config_dict["patch_size"], config_dict["image_size"], dtype=torch.float32, device=device, operations=operations) + self.pre_layrnorm = operations.LayerNorm(embed_dim) + self.encoder = CLIPEncoder(num_layers, embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) + self.post_layernorm = operations.LayerNorm(embed_dim) + + def forward(self, pixel_values, attention_mask=None, intermediate_output=None): + x = self.embeddings(pixel_values) + x = self.pre_layrnorm(x) + #TODO: attention_mask? + x, i = self.encoder(x, mask=None, intermediate_output=intermediate_output) + pooled_output = self.post_layernorm(x[:, 0, :]) + return x, i, pooled_output + +class CLIPVisionModelProjection(torch.nn.Module): + def __init__(self, config_dict, dtype, device, operations): + super().__init__() + self.vision_model = CLIPVision(config_dict, dtype, device, operations) + self.visual_projection = operations.Linear(config_dict["hidden_size"], config_dict["projection_dim"], bias=False) + + def forward(self, *args, **kwargs): + x = self.vision_model(*args, **kwargs) + out = self.visual_projection(x[2]) + return (x[0], x[1], out) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 449be8e447a..ae87c75b4d4 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,13 +1,20 @@ -from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, modeling_utils from .utils import load_torch_file, transformers_convert, common_upscale import os import torch import contextlib +import json import comfy.ops import comfy.model_patcher import comfy.model_management import comfy.utils +import comfy.clip_model + +class Output: + def __getitem__(self, key): + return getattr(self, key) + def __setitem__(self, key, item): + setattr(self, key, item) def clip_preprocess(image, size=224): mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype) @@ -22,17 +29,16 @@ def clip_preprocess(image, size=224): class ClipVisionModel(): def __init__(self, json_config): - config = CLIPVisionConfig.from_json_file(json_config) + with open(json_config) as f: + config = json.load(f) + self.load_device = comfy.model_management.text_encoder_device() offload_device = comfy.model_management.text_encoder_offload_device() self.dtype = torch.float32 if comfy.model_management.should_use_fp16(self.load_device, prioritize_performance=False): self.dtype = torch.float16 - with comfy.ops.use_comfy_ops(offload_device, self.dtype): - with modeling_utils.no_init_weights(): - self.model = CLIPVisionModelWithProjection(config) - self.model.to(self.dtype) + self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops) self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) def load_sd(self, sd): @@ -48,17 +54,12 @@ def encode_image(self, image): precision_scope = lambda a, b: contextlib.nullcontext(a) with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32): - outputs = self.model(pixel_values=pixel_values, output_hidden_states=True) - - for k in outputs: - t = outputs[k] - if t is not None: - if k == 'hidden_states': - outputs["penultimate_hidden_states"] = t[-2].to(comfy.model_management.intermediate_device()) - outputs["hidden_states"] = None - else: - outputs[k] = t.to(comfy.model_management.intermediate_device()) + out = self.model(pixel_values=pixel_values, intermediate_output=-2) + outputs = Output() + outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device()) + outputs["image_embeds"] = out[2].to(comfy.model_management.intermediate_device()) + outputs["penultimate_hidden_states"] = out[1].to(comfy.model_management.intermediate_device()) return outputs def convert_to_transformers(sd, prefix): From da74e3bbe3705d6c3141db8c19f5217f51c6d4a7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Dec 2023 12:01:17 -0500 Subject: [PATCH 294/420] Update pytorch nightly packaging workflow. --- ...update_comfyui_and_python_dependencies.bat | 3 -- .../windows_base_files/run_nvidia_gpu.bat | 2 -- .../windows_release_nightly_pytorch.yml | 33 +++++++++++++++---- 3 files changed, 26 insertions(+), 12 deletions(-) delete mode 100755 .ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat delete mode 100755 .ci/nightly/windows_base_files/run_nvidia_gpu.bat diff --git a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat b/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat deleted file mode 100755 index 94f5d1023d1..00000000000 --- a/.ci/nightly/update_windows/update_comfyui_and_python_dependencies.bat +++ /dev/null @@ -1,3 +0,0 @@ -..\python_embeded\python.exe .\update.py ..\ComfyUI\ -..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -r ../ComfyUI/requirements.txt pygit2 -pause diff --git a/.ci/nightly/windows_base_files/run_nvidia_gpu.bat b/.ci/nightly/windows_base_files/run_nvidia_gpu.bat deleted file mode 100755 index 8ee2f3402ff..00000000000 --- a/.ci/nightly/windows_base_files/run_nvidia_gpu.bat +++ /dev/null @@ -1,2 +0,0 @@ -.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --use-pytorch-cross-attention -pause diff --git a/.github/workflows/windows_release_nightly_pytorch.yml b/.github/workflows/windows_release_nightly_pytorch.yml index b793f7fe2b2..90e09d27a53 100644 --- a/.github/workflows/windows_release_nightly_pytorch.yml +++ b/.github/workflows/windows_release_nightly_pytorch.yml @@ -2,6 +2,24 @@ name: "Windows Release Nightly pytorch" on: workflow_dispatch: + inputs: + cu: + description: 'cuda version' + required: true + type: string + default: "121" + + python_minor: + description: 'python minor version' + required: true + type: string + default: "12" + + python_patch: + description: 'python patch version' + required: true + type: string + default: "1" # push: # branches: # - master @@ -20,21 +38,21 @@ jobs: persist-credentials: false - uses: actions/setup-python@v4 with: - python-version: '3.11.6' + python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }} - shell: bash run: | cd .. cp -r ComfyUI ComfyUI_copy - curl https://www.python.org/ftp/python/3.11.6/python-3.11.6-embed-amd64.zip -o python_embeded.zip + curl https://www.python.org/ftp/python/3.${{ inputs.python_minor }}.${{ inputs.python_patch }}/python-3.${{ inputs.python_minor }}.${{ inputs.python_patch }}-embed-amd64.zip -o python_embeded.zip unzip python_embeded.zip -d python_embeded cd python_embeded - echo 'import site' >> ./python311._pth + echo 'import site' >> ./python3${{ inputs.python_minor }}._pth curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py ./python.exe get-pip.py - python -m pip wheel torch torchvision torchaudio aiohttp==3.8.5 --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu121 -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir + python -m pip wheel torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 -w ../temp_wheel_dir ls ../temp_wheel_dir ./python.exe -s -m pip install --pre ../temp_wheel_dir/* - sed -i '1i../ComfyUI' ./python311._pth + sed -i '1i../ComfyUI' ./python3${{ inputs.python_minor }}._pth cd .. git clone https://github.com/comfyanonymous/taesd @@ -49,9 +67,10 @@ jobs: mkdir update cp -r ComfyUI/.ci/update_windows/* ./update/ cp -r ComfyUI/.ci/windows_base_files/* ./ - cp -r ComfyUI/.ci/nightly/update_windows/* ./update/ - cp -r ComfyUI/.ci/nightly/windows_base_files/* ./ + echo "..\python_embeded\python.exe .\update.py ..\ComfyUI\\ + ..\python_embeded\python.exe -s -m pip install --upgrade --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/cu${{ inputs.cu }} -r ../ComfyUI/requirements.txt pygit2 + pause" > ./update/update_comfyui_and_python_dependencies.bat cd .. "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma -mx=8 -mfb=64 -md=32m -ms=on -mf=BCJ2 ComfyUI_windows_portable_nightly_pytorch.7z ComfyUI_windows_portable_nightly_pytorch From 9e411073e901f766118a7b82f613872fd745ecc2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Dec 2023 13:41:30 -0500 Subject: [PATCH 295/420] Add instructions for those that have python 3.12 --- README.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 450a012bb8e..167214c05c6 100644 --- a/README.md +++ b/README.md @@ -93,23 +93,27 @@ Put your SD checkpoints (the huge ckpt/safetensors files) in: models/checkpoints Put your VAE in: models/vae -Note: pytorch does not support python 3.12 yet so make sure your python version is 3.11 or earlier. +Note: pytorch stable does not support python 3.12 yet. If you have python 3.12 you will have to use the nightly version of pytorch. If you run into issues you should try python 3.11 instead. ### AMD GPUs (Linux only) AMD users can install rocm and pytorch with pip if you don't have it already installed, this is the command to install the stable version: ```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm5.6``` -This is the command to install the nightly with ROCm 5.7 that might have some performance improvements: +This is the command to install the nightly with ROCm 5.7 which has a python 3.12 package and might have some performance improvements: ```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.7``` ### NVIDIA -Nvidia users should install pytorch using this command: +Nvidia users should install stable pytorch using this command: ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121``` +This is the command to install pytorch nightly instead which has a python 3.12 package and might have performance improvements: + +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu121``` + #### Troubleshooting If you get the "Torch not compiled with CUDA enabled" error, uninstall torch with: From cb63e230b41193601e48778111eff045391cfbe2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Dec 2023 14:15:09 -0500 Subject: [PATCH 296/420] Make lora code a bit cleaner. --- comfy/lora.py | 14 +++++++------- comfy/model_patcher.py | 14 +++++++++++--- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/comfy/lora.py b/comfy/lora.py index 29c59d89307..ecd518084a5 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -43,7 +43,7 @@ def load_lora(lora, to_load): if mid_name is not None and mid_name in lora.keys(): mid = lora[mid_name] loaded_keys.add(mid_name) - patch_dict[to_load[x]] = (lora[A_name], lora[B_name], alpha, mid) + patch_dict[to_load[x]] = ("lora", (lora[A_name], lora[B_name], alpha, mid)) loaded_keys.add(A_name) loaded_keys.add(B_name) @@ -64,7 +64,7 @@ def load_lora(lora, to_load): loaded_keys.add(hada_t1_name) loaded_keys.add(hada_t2_name) - patch_dict[to_load[x]] = (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2) + patch_dict[to_load[x]] = ("loha", (lora[hada_w1_a_name], lora[hada_w1_b_name], alpha, lora[hada_w2_a_name], lora[hada_w2_b_name], hada_t1, hada_t2)) loaded_keys.add(hada_w1_a_name) loaded_keys.add(hada_w1_b_name) loaded_keys.add(hada_w2_a_name) @@ -116,7 +116,7 @@ def load_lora(lora, to_load): loaded_keys.add(lokr_t2_name) if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None): - patch_dict[to_load[x]] = (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2) + patch_dict[to_load[x]] = ("lokr", (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)) w_norm_name = "{}.w_norm".format(x) @@ -126,21 +126,21 @@ def load_lora(lora, to_load): if w_norm is not None: loaded_keys.add(w_norm_name) - patch_dict[to_load[x]] = (w_norm,) + patch_dict[to_load[x]] = ("diff", (w_norm,)) if b_norm is not None: loaded_keys.add(b_norm_name) - patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (b_norm,) + patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = ("diff", (b_norm,)) diff_name = "{}.diff".format(x) diff_weight = lora.get(diff_name, None) if diff_weight is not None: - patch_dict[to_load[x]] = (diff_weight,) + patch_dict[to_load[x]] = ("diff", (diff_weight,)) loaded_keys.add(diff_name) diff_bias_name = "{}.diff_b".format(x) diff_bias = lora.get(diff_bias_name, None) if diff_bias is not None: - patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = (diff_bias,) + patch_dict["{}.bias".format(to_load[x][:-len(".weight")])] = ("diff", (diff_bias,)) loaded_keys.add(diff_bias_name) for x in lora.keys(): diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index a3cffc3be9d..d78cdfd4dfd 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -217,13 +217,19 @@ def calculate_weight(self, patches, weight, key): v = (self.calculate_weight(v[1:], v[0].clone(), key), ) if len(v) == 1: + patch_type = "diff" + elif len(v) == 2: + patch_type = v[0] + v = v[1] + + if patch_type == "diff": w1 = v[0] if alpha != 0.0: if w1.shape != weight.shape: print("WARNING SHAPE MISMATCH {} WEIGHT NOT MERGED {} != {}".format(key, w1.shape, weight.shape)) else: weight += alpha * comfy.model_management.cast_to_device(w1, weight.device, weight.dtype) - elif len(v) == 4: #lora/locon + elif patch_type == "lora": #lora/locon mat1 = comfy.model_management.cast_to_device(v[0], weight.device, torch.float32) mat2 = comfy.model_management.cast_to_device(v[1], weight.device, torch.float32) if v[2] is not None: @@ -237,7 +243,7 @@ def calculate_weight(self, patches, weight, key): weight += (alpha * torch.mm(mat1.flatten(start_dim=1), mat2.flatten(start_dim=1))).reshape(weight.shape).type(weight.dtype) except Exception as e: print("ERROR", key, e) - elif len(v) == 8: #lokr + elif patch_type == "lokr": w1 = v[0] w2 = v[1] w1_a = v[3] @@ -276,7 +282,7 @@ def calculate_weight(self, patches, weight, key): weight += alpha * torch.kron(w1, w2).reshape(weight.shape).type(weight.dtype) except Exception as e: print("ERROR", key, e) - else: #loha + elif patch_type == "loha": w1a = v[0] w1b = v[1] if v[2] is not None: @@ -305,6 +311,8 @@ def calculate_weight(self, patches, weight, key): weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype) except Exception as e: print("ERROR", key, e) + else: + print("patch type not recognized", patch_type, key) return weight From 614b7e731f7f9fdcf11eeb46e0623b0977a7e634 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 9 Dec 2023 18:15:26 -0500 Subject: [PATCH 297/420] Implement GLora. --- comfy/lora.py | 11 +++++++++++ comfy/model_patcher.py | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/comfy/lora.py b/comfy/lora.py index ecd518084a5..5e4009b47f9 100644 --- a/comfy/lora.py +++ b/comfy/lora.py @@ -118,6 +118,17 @@ def load_lora(lora, to_load): if (lokr_w1 is not None) or (lokr_w2 is not None) or (lokr_w1_a is not None) or (lokr_w2_a is not None): patch_dict[to_load[x]] = ("lokr", (lokr_w1, lokr_w2, alpha, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t2)) + #glora + a1_name = "{}.a1.weight".format(x) + a2_name = "{}.a2.weight".format(x) + b1_name = "{}.b1.weight".format(x) + b2_name = "{}.b2.weight".format(x) + if a1_name in lora: + patch_dict[to_load[x]] = ("glora", (lora[a1_name], lora[a2_name], lora[b1_name], lora[b2_name], alpha)) + loaded_keys.add(a1_name) + loaded_keys.add(a2_name) + loaded_keys.add(b1_name) + loaded_keys.add(b2_name) w_norm_name = "{}.w_norm".format(x) b_norm_name = "{}.b_norm".format(x) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index d78cdfd4dfd..55ca913ec78 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -311,6 +311,16 @@ def calculate_weight(self, patches, weight, key): weight += (alpha * m1 * m2).reshape(weight.shape).type(weight.dtype) except Exception as e: print("ERROR", key, e) + elif patch_type == "glora": + if v[4] is not None: + alpha *= v[4] / v[0].shape[0] + + a1 = comfy.model_management.cast_to_device(v[0].flatten(start_dim=1), weight.device, torch.float32) + a2 = comfy.model_management.cast_to_device(v[1].flatten(start_dim=1), weight.device, torch.float32) + b1 = comfy.model_management.cast_to_device(v[2].flatten(start_dim=1), weight.device, torch.float32) + b2 = comfy.model_management.cast_to_device(v[3].flatten(start_dim=1), weight.device, torch.float32) + + weight += ((torch.mm(b2, b1) + torch.mm(torch.mm(weight.flatten(start_dim=1), a2), a1)) * alpha).reshape(weight.shape).type(weight.dtype) else: print("patch type not recognized", patch_type, key) From 340177e6e85d076ab9e222e4f3c6a22f1fb4031f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 10 Dec 2023 01:30:35 -0500 Subject: [PATCH 298/420] Disable non blocking on mps. --- comfy/model_management.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index ef9bec54563..0c51eee51e0 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -553,15 +553,19 @@ def cast_to_device(tensor, device, dtype, copy=False): elif is_intel_xpu(): device_supports_cast = True + non_blocking = True + if is_device_mps(device): + non_blocking = False #pytorch bug? mps doesn't support non blocking + if device_supports_cast: if copy: if tensor.device == device: - return tensor.to(dtype, copy=copy, non_blocking=True) - return tensor.to(device, copy=copy, non_blocking=True).to(dtype, non_blocking=True) + return tensor.to(dtype, copy=copy, non_blocking=non_blocking) + return tensor.to(device, copy=copy, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking) else: - return tensor.to(device, non_blocking=True).to(dtype, non_blocking=True) + return tensor.to(device, non_blocking=non_blocking).to(dtype, non_blocking=non_blocking) else: - return tensor.to(device, dtype, copy=copy, non_blocking=True) + return tensor.to(device, dtype, copy=copy, non_blocking=non_blocking) def xformers_enabled(): global directml_enabled From 69033081c50de94cbc2a4fce12900611da04b1e9 Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" Date: Mon, 11 Dec 2023 00:24:16 +0900 Subject: [PATCH 299/420] mask editor bugfix - Addressing the issue where an unnecessary hidden panel disrupts the drawing. --- web/extensions/core/maskeditor.js | 6 ------ 1 file changed, 6 deletions(-) diff --git a/web/extensions/core/maskeditor.js b/web/extensions/core/maskeditor.js index 1ea4dbcaa5c..bb2f16d42b5 100644 --- a/web/extensions/core/maskeditor.js +++ b/web/extensions/core/maskeditor.js @@ -167,10 +167,6 @@ class MaskEditorDialog extends ComfyDialog { // If it is specified as relative, using it only as a hidden placeholder for padding is recommended // to prevent anomalies where it exceeds a certain size and goes outside of the window. - var placeholder = document.createElement("div"); - placeholder.style.position = "relative"; - placeholder.style.height = "50px"; - var bottom_panel = document.createElement("div"); bottom_panel.style.position = "absolute"; bottom_panel.style.bottom = "0px"; @@ -192,7 +188,6 @@ class MaskEditorDialog extends ComfyDialog { this.brush = brush; this.element.appendChild(imgCanvas); this.element.appendChild(maskCanvas); - this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button this.element.appendChild(bottom_panel); document.body.appendChild(brush); @@ -218,7 +213,6 @@ class MaskEditorDialog extends ComfyDialog { this.element.appendChild(imgCanvas); this.element.appendChild(maskCanvas); - this.element.appendChild(placeholder); // must below z-index than bottom_panel to avoid covering button this.element.appendChild(bottom_panel); bottom_panel.appendChild(clearButton); From 57926635e8d84ae9eea4a0416cc75e363f5ede45 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 10 Dec 2023 23:00:54 -0500 Subject: [PATCH 300/420] Switch text encoder to manual cast. Use fp16 text encoder weights for CPU inference to lower memory usage. --- comfy/model_management.py | 3 +++ comfy/ops.py | 33 +++++++++++++++++++++++++ comfy/sd1_clip.py | 52 +++++++++++++++++---------------------- 3 files changed, 59 insertions(+), 29 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 0c51eee51e0..a6c8fb352b2 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -503,6 +503,9 @@ def text_encoder_dtype(device=None): elif args.fp32_text_enc: return torch.float32 + if is_device_cpu(device): + return torch.float16 + if should_use_fp16(device, prioritize_performance=False): return torch.float16 else: diff --git a/comfy/ops.py b/comfy/ops.py index deb849d63c9..e48568409a1 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -29,6 +29,39 @@ def conv_nd(dims, *args, **kwargs): else: raise ValueError(f"unsupported dimensions: {dims}") +def cast_bias_weight(s, input): + bias = None + if s.bias is not None: + bias = s.bias.to(device=input.device, dtype=input.dtype) + weight = s.weight.to(device=input.device, dtype=input.dtype) + return weight, bias + +class manual_cast: + class Linear(Linear): + def forward(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.linear(input, weight, bias) + + class Conv2d(Conv2d): + def forward(self, input): + weight, bias = cast_bias_weight(self, input) + return self._conv_forward(input, weight, bias) + + class Conv3d(Conv3d): + def forward(self, input): + weight, bias = cast_bias_weight(self, input) + return self._conv_forward(input, weight, bias) + + class GroupNorm(GroupNorm): + def forward(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + + class LayerNorm(LayerNorm): + def forward(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + @contextmanager def use_comfy_ops(device=None, dtype=None): # Kind of an ugly hack but I can't think of a better way old_torch_nn_linear = torch.nn.Linear diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 4530168ab7a..6ffef515ede 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -78,7 +78,7 @@ def __init__(self, version="openai/clip-vit-large-patch14", device="cpu", max_le with open(textmodel_json_config) as f: config = json.load(f) - self.transformer = model_class(config, dtype, device, comfy.ops) + self.transformer = model_class(config, dtype, device, comfy.ops.manual_cast) self.num_layers = self.transformer.num_layers self.max_length = max_length @@ -160,37 +160,31 @@ def forward(self, tokens): tokens = self.set_up_textual_embeddings(tokens, backup_embeds) tokens = torch.LongTensor(tokens).to(device) - if self.transformer.dtype != torch.float32: - precision_scope = torch.autocast + attention_mask = None + if self.enable_attention_masks: + attention_mask = torch.zeros_like(tokens) + max_token = self.transformer.get_input_embeddings().weight.shape[0] - 1 + for x in range(attention_mask.shape[0]): + for y in range(attention_mask.shape[1]): + attention_mask[x, y] = 1 + if tokens[x, y] == max_token: + break + + outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) + self.transformer.set_input_embeddings(backup_embeds) + + if self.layer == "last": + z = outputs[0] else: - precision_scope = lambda a, dtype: contextlib.nullcontext(a) - - with precision_scope(model_management.get_autocast_device(device), dtype=torch.float32): - attention_mask = None - if self.enable_attention_masks: - attention_mask = torch.zeros_like(tokens) - max_token = self.transformer.get_input_embeddings().weight.shape[0] - 1 - for x in range(attention_mask.shape[0]): - for y in range(attention_mask.shape[1]): - attention_mask[x, y] = 1 - if tokens[x, y] == max_token: - break - - outputs = self.transformer(tokens, attention_mask, intermediate_output=self.layer_idx, final_layer_norm_intermediate=self.layer_norm_hidden_state) - self.transformer.set_input_embeddings(backup_embeds) - - if self.layer == "last": - z = outputs[0] - else: - z = outputs[1] + z = outputs[1] - if outputs[2] is not None: - pooled_output = outputs[2].float() - else: - pooled_output = None + if outputs[2] is not None: + pooled_output = outputs[2].float() + else: + pooled_output = None - if self.text_projection is not None and pooled_output is not None: - pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() + if self.text_projection is not None and pooled_output is not None: + pooled_output = pooled_output.float().to(self.text_projection.device) @ self.text_projection.float() return z.float(), pooled_output def encode(self, tokens): From ab93abd4b2eaf99d4a52f9a036600d9d46355d92 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 11 Dec 2023 17:33:35 +0000 Subject: [PATCH 301/420] Prevent cleaning graph state on undo/redo (#2255) * Prevent cleaning graph state on undo/redo * Remove pause rendering due to LG bug --- web/extensions/core/undoRedo.js | 25 +++++++++++-------------- web/scripts/app.js | 7 +++++-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js index c6613b0f02d..3cb137520f4 100644 --- a/web/extensions/core/undoRedo.js +++ b/web/extensions/core/undoRedo.js @@ -71,24 +71,21 @@ function graphEqual(a, b, root = true) { } const undoRedo = async (e) => { + const updateState = async (source, target) => { + const prevState = source.pop(); + if (prevState) { + target.push(activeState); + isOurLoad = true; + await app.loadGraphData(prevState, false); + activeState = prevState; + } + } if (e.ctrlKey || e.metaKey) { if (e.key === "y") { - const prevState = redo.pop(); - if (prevState) { - undo.push(activeState); - isOurLoad = true; - await app.loadGraphData(prevState); - activeState = prevState; - } + updateState(redo, undo); return true; } else if (e.key === "z") { - const prevState = undo.pop(); - if (prevState) { - redo.push(activeState); - isOurLoad = true; - await app.loadGraphData(prevState); - activeState = prevState; - } + updateState(undo, redo); return true; } } diff --git a/web/scripts/app.js b/web/scripts/app.js index 5faf41fb36b..d2a6f4de425 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1559,9 +1559,12 @@ export class ComfyApp { /** * Populates the graph with the specified workflow data * @param {*} graphData A serialized graph object + * @param { boolean } clean If the graph state, e.g. images, should be cleared */ - async loadGraphData(graphData) { - this.clean(); + async loadGraphData(graphData, clean = true) { + if (clean !== false) { + this.clean(); + } let reset_invalid_values = false; if (!graphData) { From ba07cb748e4793a6393288d621aa8e2f0f282595 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Dec 2023 18:24:44 -0500 Subject: [PATCH 302/420] Use faster manual cast for fp8 in unet. --- comfy/model_base.py | 19 ++++++++++--------- comfy/model_management.py | 16 +++++++++++++++- comfy/ops.py | 9 +++++++++ comfy/sd.py | 12 ++++++++++-- comfy/supported_models_base.py | 4 ++++ 5 files changed, 48 insertions(+), 12 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 5bfcc391ded..bab7b9b340d 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -4,6 +4,7 @@ from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep import comfy.model_management import comfy.conds +import comfy.ops from enum import Enum import contextlib from . import utils @@ -41,9 +42,14 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None): unet_config = model_config.unet_config self.latent_format = model_config.latent_format self.model_config = model_config + self.manual_cast_dtype = model_config.manual_cast_dtype if not unet_config.get("disable_unet_model_creation", False): - self.diffusion_model = UNetModel(**unet_config, device=device) + if self.manual_cast_dtype is not None: + operations = comfy.ops.manual_cast + else: + operations = comfy.ops + self.diffusion_model = UNetModel(**unet_config, device=device, operations=operations) self.model_type = model_type self.model_sampling = model_sampling(model_config, model_type) @@ -63,11 +69,8 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans context = c_crossattn dtype = self.get_dtype() - if comfy.model_management.supports_dtype(xc.device, dtype): - precision_scope = lambda a: contextlib.nullcontext(a) - else: - precision_scope = torch.autocast - dtype = torch.float32 + if self.manual_cast_dtype is not None: + dtype = self.manual_cast_dtype xc = xc.to(dtype) t = self.model_sampling.timestep(t).float() @@ -79,9 +82,7 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans extra = extra.to(dtype) extra_conds[o] = extra - with precision_scope(comfy.model_management.get_autocast_device(xc.device)): - model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() - + model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() return self.model_sampling.calculate_denoised(sigma, model_output, x) def get_dtype(self): diff --git a/comfy/model_management.py b/comfy/model_management.py index a6c8fb352b2..fe0374a8b2f 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -474,6 +474,20 @@ def unet_dtype(device=None, model_params=0): return torch.float16 return torch.float32 +# None means no manual cast +def unet_manual_cast(weight_dtype, inference_device): + if weight_dtype == torch.float32: + return None + + fp16_supported = comfy.model_management.should_use_fp16(inference_device, prioritize_performance=False) + if fp16_supported and weight_dtype == torch.float16: + return None + + if fp16_supported: + return torch.float16 + else: + return torch.float32 + def text_encoder_offload_device(): if args.gpu_only: return get_torch_device() @@ -538,7 +552,7 @@ def get_autocast_device(dev): def supports_dtype(device, dtype): #TODO if dtype == torch.float32: return True - if torch.device("cpu") == device: + if is_device_cpu(device): return False if dtype == torch.float16: return True diff --git a/comfy/ops.py b/comfy/ops.py index e48568409a1..a67bc809fd2 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -62,6 +62,15 @@ def forward(self, input): weight, bias = cast_bias_weight(self, input) return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + @classmethod + def conv_nd(s, dims, *args, **kwargs): + if dims == 2: + return s.Conv2d(*args, **kwargs) + elif dims == 3: + return s.Conv3d(*args, **kwargs) + else: + raise ValueError(f"unsupported dimensions: {dims}") + @contextmanager def use_comfy_ops(device=None, dtype=None): # Kind of an ugly hack but I can't think of a better way old_torch_nn_linear = torch.nn.Linear diff --git a/comfy/sd.py b/comfy/sd.py index 43e201d363b..8c056e4ea2f 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -433,11 +433,15 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o parameters = comfy.utils.calculate_parameters(sd, "model.diffusion_model.") unet_dtype = model_management.unet_dtype(model_params=parameters) + load_device = model_management.get_torch_device() + manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device) class WeightsLoader(torch.nn.Module): pass model_config = model_detection.model_config_from_unet(sd, "model.diffusion_model.", unet_dtype) + model_config.set_manual_cast(manual_cast_dtype) + if model_config is None: raise RuntimeError("ERROR: Could not detect model type of: {}".format(ckpt_path)) @@ -470,7 +474,7 @@ class WeightsLoader(torch.nn.Module): print("left over keys:", left_over) if output_model: - model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=model_management.unet_offload_device(), current_device=inital_load_device) + model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device) if inital_load_device != torch.device("cpu"): print("loaded straight to GPU") model_management.load_model_gpu(model_patcher) @@ -481,6 +485,9 @@ class WeightsLoader(torch.nn.Module): def load_unet_state_dict(sd): #load unet in diffusers format parameters = comfy.utils.calculate_parameters(sd) unet_dtype = model_management.unet_dtype(model_params=parameters) + load_device = model_management.get_torch_device() + manual_cast_dtype = model_management.unet_manual_cast(unet_dtype, load_device) + if "input_blocks.0.0.weight" in sd: #ldm model_config = model_detection.model_config_from_unet(sd, "", unet_dtype) if model_config is None: @@ -501,13 +508,14 @@ def load_unet_state_dict(sd): #load unet in diffusers format else: print(diffusers_keys[k], k) offload_device = model_management.unet_offload_device() + model_config.set_manual_cast(manual_cast_dtype) model = model_config.get_model(new_sd, "") model = model.to(offload_device) model.load_model_weights(new_sd, "") left_over = sd.keys() if len(left_over) > 0: print("left over keys in unet:", left_over) - return comfy.model_patcher.ModelPatcher(model, load_device=model_management.get_torch_device(), offload_device=offload_device) + return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) def load_unet(unet_path): sd = comfy.utils.load_torch_file(unet_path) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 3412cfea030..49087d23e5d 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -22,6 +22,8 @@ class BASE: sampling_settings = {} latent_format = latent_formats.LatentFormat + manual_cast_dtype = None + @classmethod def matches(s, unet_config): for k in s.unet_config: @@ -71,3 +73,5 @@ def process_vae_state_dict_for_saving(self, state_dict): replace_prefix = {"": "first_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def set_manual_cast(self, manual_cast_dtype): + self.manual_cast_dtype = manual_cast_dtype From b0aab1e4ea3dfefe09c4f07de0e5237558097e22 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Dec 2023 18:36:29 -0500 Subject: [PATCH 303/420] Add an option --fp16-unet to force using fp16 for the unet. --- comfy/cli_args.py | 1 + comfy/model_management.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 58d0348028f..d9c8668f470 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -57,6 +57,7 @@ def __call__(self, parser, namespace, values, option_string=None): fpunet_group = parser.add_mutually_exclusive_group() fpunet_group.add_argument("--bf16-unet", action="store_true", help="Run the UNET in bf16. This should only be used for testing stuff.") +fpunet_group.add_argument("--fp16-unet", action="store_true", help="Store unet weights in fp16.") fpunet_group.add_argument("--fp8_e4m3fn-unet", action="store_true", help="Store unet weights in fp8_e4m3fn.") fpunet_group.add_argument("--fp8_e5m2-unet", action="store_true", help="Store unet weights in fp8_e5m2.") diff --git a/comfy/model_management.py b/comfy/model_management.py index fe0374a8b2f..b6a9471bfa1 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -466,6 +466,8 @@ def unet_inital_load_device(parameters, dtype): def unet_dtype(device=None, model_params=0): if args.bf16_unet: return torch.bfloat16 + if args.fp16_unet: + return torch.float16 if args.fp8_e4m3fn_unet: return torch.float8_e4m3fn if args.fp8_e5m2_unet: From 77755ab8dbc74f3f231aa817590401d7969f96a4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Dec 2023 23:27:13 -0500 Subject: [PATCH 304/420] Refactor comfy.ops comfy.ops -> comfy.ops.disable_weight_init This should make it more clear what they actually do. Some unused code has also been removed. --- comfy/cldm/cldm.py | 2 +- comfy/clip_vision.py | 2 +- comfy/controlnet.py | 27 ++---- comfy/ldm/modules/attention.py | 13 +-- comfy/ldm/modules/diffusionmodules/model.py | 39 ++++----- .../modules/diffusionmodules/openaimodel.py | 14 +-- comfy/ldm/modules/diffusionmodules/util.py | 41 --------- comfy/ldm/modules/temporal_ae.py | 5 +- comfy/model_base.py | 2 +- comfy/ops.py | 85 +++++++------------ 10 files changed, 77 insertions(+), 153 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index bbe5891e691..00373a7903f 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -53,7 +53,7 @@ def __init__( transformer_depth_middle=None, transformer_depth_output=None, device=None, - operations=comfy.ops, + operations=comfy.ops.disable_weight_init, **kwargs, ): super().__init__() diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index ae87c75b4d4..ba8a3a8d569 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -38,7 +38,7 @@ def __init__(self, json_config): if comfy.model_management.should_use_fp16(self.load_device, prioritize_performance=False): self.dtype = torch.float16 - self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops) + self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops.disable_weight_init) self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) def load_sd(self, sd): diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 6d37aa74f69..3212ac8c4b9 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -208,9 +208,9 @@ def __init__(self, in_features: int, out_features: int, bias: bool = True, def forward(self, input): if self.up is not None: - return torch.nn.functional.linear(input, self.weight.to(input.dtype).to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) + return torch.nn.functional.linear(input, self.weight.to(dtype=input.dtype, device=input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) else: - return torch.nn.functional.linear(input, self.weight.to(input.device), self.bias) + return torch.nn.functional.linear(input, self.weight.to(dtype=input.dtype, device=input.device), self.bias) class Conv2d(torch.nn.Module): def __init__( @@ -247,24 +247,9 @@ def __init__( def forward(self, input): if self.up is not None: - return torch.nn.functional.conv2d(input, self.weight.to(input.dtype).to(input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, self.weight.to(dtype=input.dtype, device=input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) else: - return torch.nn.functional.conv2d(input, self.weight.to(input.device), self.bias, self.stride, self.padding, self.dilation, self.groups) - - def conv_nd(self, dims, *args, **kwargs): - if dims == 2: - return self.Conv2d(*args, **kwargs) - else: - raise ValueError(f"unsupported dimensions: {dims}") - - class Conv3d(comfy.ops.Conv3d): - pass - - class GroupNorm(comfy.ops.GroupNorm): - pass - - class LayerNorm(comfy.ops.LayerNorm): - pass + return torch.nn.functional.conv2d(input, self.weight.to(dtype=input.dtype, device=input.device), self.bias, self.stride, self.padding, self.dilation, self.groups) class ControlLora(ControlNet): @@ -278,7 +263,9 @@ def pre_run(self, model, percent_to_timestep_function): controlnet_config = model.model_config.unet_config.copy() controlnet_config.pop("out_channels") controlnet_config["hint_channels"] = self.control_weights["input_hint_block.0.weight"].shape[1] - controlnet_config["operations"] = ControlLoraOps() + class control_lora_ops(ControlLoraOps, comfy.ops.disable_weight_init): + pass + controlnet_config["operations"] = control_lora_ops self.control_model = comfy.cldm.cldm.ControlNet(**controlnet_config) dtype = model.get_dtype() self.control_model.to(dtype) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 8299b1d94bb..8d86aa53d2e 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -19,6 +19,7 @@ from comfy.cli_args import args import comfy.ops +ops = comfy.ops.disable_weight_init # CrossAttn precision handling if args.dont_upcast_attention: @@ -55,7 +56,7 @@ def init_(tensor): # feedforward class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=comfy.ops): + def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops): super().__init__() self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device) @@ -65,7 +66,7 @@ def forward(self, x): class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=comfy.ops): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=ops): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) @@ -356,7 +357,7 @@ def optimized_attention_for_device(device, mask=False): class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=ops): super().__init__() inner_dim = dim_head * heads context_dim = default(context_dim, query_dim) @@ -389,7 +390,7 @@ def forward(self, x, context=None, value=None, mask=None): class BasicTransformerBlock(nn.Module): def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None, - disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=comfy.ops): + disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=ops): super().__init__() self.ff_in = ff_in or inner_dim is not None @@ -558,7 +559,7 @@ class SpatialTransformer(nn.Module): def __init__(self, in_channels, n_heads, d_head, depth=1, dropout=0., context_dim=None, disable_self_attn=False, use_linear=False, - use_checkpoint=True, dtype=None, device=None, operations=comfy.ops): + use_checkpoint=True, dtype=None, device=None, operations=ops): super().__init__() if exists(context_dim) and not isinstance(context_dim, list): context_dim = [context_dim] * depth @@ -632,7 +633,7 @@ def __init__( disable_self_attn=False, disable_temporal_crossattention=False, max_time_embed_period: int = 10000, - dtype=None, device=None, operations=comfy.ops + dtype=None, device=None, operations=ops ): super().__init__( in_channels, diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index f23417fd216..fce29cb85ec 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -8,6 +8,7 @@ from comfy import model_management import comfy.ops +ops = comfy.ops.disable_weight_init if model_management.xformers_enabled_vae(): import xformers @@ -48,7 +49,7 @@ def __init__(self, in_channels, with_conv): super().__init__() self.with_conv = with_conv if self.with_conv: - self.conv = comfy.ops.Conv2d(in_channels, + self.conv = ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, @@ -78,7 +79,7 @@ def __init__(self, in_channels, with_conv): self.with_conv = with_conv if self.with_conv: # no asymmetric padding in torch conv, must do it ourselves - self.conv = comfy.ops.Conv2d(in_channels, + self.conv = ops.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, @@ -105,30 +106,30 @@ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, self.swish = torch.nn.SiLU(inplace=True) self.norm1 = Normalize(in_channels) - self.conv1 = comfy.ops.Conv2d(in_channels, + self.conv1 = ops.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) if temb_channels > 0: - self.temb_proj = comfy.ops.Linear(temb_channels, + self.temb_proj = ops.Linear(temb_channels, out_channels) self.norm2 = Normalize(out_channels) self.dropout = torch.nn.Dropout(dropout, inplace=True) - self.conv2 = comfy.ops.Conv2d(out_channels, + self.conv2 = ops.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1) if self.in_channels != self.out_channels: if self.use_conv_shortcut: - self.conv_shortcut = comfy.ops.Conv2d(in_channels, + self.conv_shortcut = ops.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1) else: - self.nin_shortcut = comfy.ops.Conv2d(in_channels, + self.nin_shortcut = ops.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, @@ -245,22 +246,22 @@ def __init__(self, in_channels): self.in_channels = in_channels self.norm = Normalize(in_channels) - self.q = comfy.ops.Conv2d(in_channels, + self.q = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.k = comfy.ops.Conv2d(in_channels, + self.k = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.v = comfy.ops.Conv2d(in_channels, + self.v = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0) - self.proj_out = comfy.ops.Conv2d(in_channels, + self.proj_out = ops.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, @@ -312,14 +313,14 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, # timestep embedding self.temb = nn.Module() self.temb.dense = nn.ModuleList([ - comfy.ops.Linear(self.ch, + ops.Linear(self.ch, self.temb_ch), - comfy.ops.Linear(self.temb_ch, + ops.Linear(self.temb_ch, self.temb_ch), ]) # downsampling - self.conv_in = comfy.ops.Conv2d(in_channels, + self.conv_in = ops.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, @@ -388,7 +389,7 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, # end self.norm_out = Normalize(block_in) - self.conv_out = comfy.ops.Conv2d(block_in, + self.conv_out = ops.Conv2d(block_in, out_ch, kernel_size=3, stride=1, @@ -461,7 +462,7 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, self.in_channels = in_channels # downsampling - self.conv_in = comfy.ops.Conv2d(in_channels, + self.conv_in = ops.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, @@ -506,7 +507,7 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, # end self.norm_out = Normalize(block_in) - self.conv_out = comfy.ops.Conv2d(block_in, + self.conv_out = ops.Conv2d(block_in, 2*z_channels if double_z else z_channels, kernel_size=3, stride=1, @@ -541,7 +542,7 @@ class Decoder(nn.Module): def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - conv_out_op=comfy.ops.Conv2d, + conv_out_op=ops.Conv2d, resnet_op=ResnetBlock, attn_op=AttnBlock, **ignorekwargs): @@ -565,7 +566,7 @@ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, self.z_shape, np.prod(self.z_shape))) # z to block_in - self.conv_in = comfy.ops.Conv2d(z_channels, + self.conv_in = ops.Conv2d(z_channels, block_in, kernel_size=3, stride=1, diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 12efd833c51..057dd16b250 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -12,13 +12,13 @@ checkpoint, avg_pool_nd, zero_module, - normalization, timestep_embedding, AlphaBlender, ) from ..attention import SpatialTransformer, SpatialVideoTransformer, default from comfy.ldm.util import exists import comfy.ops +ops = comfy.ops.disable_weight_init class TimestepBlock(nn.Module): """ @@ -70,7 +70,7 @@ class Upsample(nn.Module): upsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=comfy.ops): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=ops): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -106,7 +106,7 @@ class Downsample(nn.Module): downsampling occurs in the inner-two dimensions. """ - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=comfy.ops): + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, dtype=None, device=None, operations=ops): super().__init__() self.channels = channels self.out_channels = out_channels or channels @@ -159,7 +159,7 @@ def __init__( skip_t_emb=False, dtype=None, device=None, - operations=comfy.ops + operations=ops ): super().__init__() self.channels = channels @@ -284,7 +284,7 @@ def __init__( down: bool = False, dtype=None, device=None, - operations=comfy.ops + operations=ops ): super().__init__( channels, @@ -434,7 +434,7 @@ def __init__( disable_temporal_crossattention=False, max_ddpm_temb_period=10000, device=None, - operations=comfy.ops, + operations=ops, ): super().__init__() assert use_spatial_transformer == True, "use_spatial_transformer has to be true" @@ -581,7 +581,7 @@ def get_resblock( up=False, dtype=None, device=None, - operations=comfy.ops + operations=ops ): if self.use_temporal_resblocks: return VideoResBlock( diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 704bbe57450..68175b62a58 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -16,7 +16,6 @@ from einops import repeat, rearrange from comfy.ldm.util import instantiate_from_config -import comfy.ops class AlphaBlender(nn.Module): strategies = ["learned", "fixed", "learned_with_images"] @@ -273,46 +272,6 @@ def mean_flat(tensor): return tensor.mean(dim=list(range(1, len(tensor.shape)))) -def normalization(channels, dtype=None): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels, dtype=dtype) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return comfy.ops.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return comfy.ops.Linear(*args, **kwargs) - - def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. diff --git a/comfy/ldm/modules/temporal_ae.py b/comfy/ldm/modules/temporal_ae.py index 11ae049f3be..7ea68dc9e28 100644 --- a/comfy/ldm/modules/temporal_ae.py +++ b/comfy/ldm/modules/temporal_ae.py @@ -5,6 +5,7 @@ from einops import rearrange, repeat import comfy.ops +ops = comfy.ops.disable_weight_init from .diffusionmodules.model import ( AttnBlock, @@ -130,9 +131,9 @@ def __init__( time_embed_dim = self.in_channels * 4 self.video_time_embed = torch.nn.Sequential( - comfy.ops.Linear(self.in_channels, time_embed_dim), + ops.Linear(self.in_channels, time_embed_dim), torch.nn.SiLU(), - comfy.ops.Linear(time_embed_dim, self.in_channels), + ops.Linear(time_embed_dim, self.in_channels), ) self.merge_strategy = merge_strategy diff --git a/comfy/model_base.py b/comfy/model_base.py index bab7b9b340d..412c837925c 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -48,7 +48,7 @@ def __init__(self, model_config, model_type=ModelType.EPS, device=None): if self.manual_cast_dtype is not None: operations = comfy.ops.manual_cast else: - operations = comfy.ops + operations = comfy.ops.disable_weight_init self.diffusion_model = UNetModel(**unet_config, device=device, operations=operations) self.model_type = model_type self.model_sampling = model_sampling(model_config, model_type) diff --git a/comfy/ops.py b/comfy/ops.py index a67bc809fd2..08c63384789 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1,33 +1,35 @@ import torch from contextlib import contextmanager -class Linear(torch.nn.Linear): - def reset_parameters(self): - return None +class disable_weight_init: + class Linear(torch.nn.Linear): + def reset_parameters(self): + return None -class Conv2d(torch.nn.Conv2d): - def reset_parameters(self): - return None + class Conv2d(torch.nn.Conv2d): + def reset_parameters(self): + return None -class Conv3d(torch.nn.Conv3d): - def reset_parameters(self): - return None + class Conv3d(torch.nn.Conv3d): + def reset_parameters(self): + return None -class GroupNorm(torch.nn.GroupNorm): - def reset_parameters(self): - return None + class GroupNorm(torch.nn.GroupNorm): + def reset_parameters(self): + return None -class LayerNorm(torch.nn.LayerNorm): - def reset_parameters(self): - return None + class LayerNorm(torch.nn.LayerNorm): + def reset_parameters(self): + return None -def conv_nd(dims, *args, **kwargs): - if dims == 2: - return Conv2d(*args, **kwargs) - elif dims == 3: - return Conv3d(*args, **kwargs) - else: - raise ValueError(f"unsupported dimensions: {dims}") + @classmethod + def conv_nd(s, dims, *args, **kwargs): + if dims == 2: + return s.Conv2d(*args, **kwargs) + elif dims == 3: + return s.Conv3d(*args, **kwargs) + else: + raise ValueError(f"unsupported dimensions: {dims}") def cast_bias_weight(s, input): bias = None @@ -36,55 +38,28 @@ def cast_bias_weight(s, input): weight = s.weight.to(device=input.device, dtype=input.dtype) return weight, bias -class manual_cast: - class Linear(Linear): +class manual_cast(disable_weight_init): + class Linear(disable_weight_init.Linear): def forward(self, input): weight, bias = cast_bias_weight(self, input) return torch.nn.functional.linear(input, weight, bias) - class Conv2d(Conv2d): + class Conv2d(disable_weight_init.Conv2d): def forward(self, input): weight, bias = cast_bias_weight(self, input) return self._conv_forward(input, weight, bias) - class Conv3d(Conv3d): + class Conv3d(disable_weight_init.Conv3d): def forward(self, input): weight, bias = cast_bias_weight(self, input) return self._conv_forward(input, weight, bias) - class GroupNorm(GroupNorm): + class GroupNorm(disable_weight_init.GroupNorm): def forward(self, input): weight, bias = cast_bias_weight(self, input) return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) - class LayerNorm(LayerNorm): + class LayerNorm(disable_weight_init.LayerNorm): def forward(self, input): weight, bias = cast_bias_weight(self, input) return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) - - @classmethod - def conv_nd(s, dims, *args, **kwargs): - if dims == 2: - return s.Conv2d(*args, **kwargs) - elif dims == 3: - return s.Conv3d(*args, **kwargs) - else: - raise ValueError(f"unsupported dimensions: {dims}") - -@contextmanager -def use_comfy_ops(device=None, dtype=None): # Kind of an ugly hack but I can't think of a better way - old_torch_nn_linear = torch.nn.Linear - force_device = device - force_dtype = dtype - def linear_with_dtype(in_features: int, out_features: int, bias: bool = True, device=None, dtype=None): - if force_device is not None: - device = force_device - if force_dtype is not None: - dtype = force_dtype - return Linear(in_features, out_features, bias=bias, device=device, dtype=dtype) - - torch.nn.Linear = linear_with_dtype - try: - yield - finally: - torch.nn.Linear = old_torch_nn_linear From 3152023fbc4f8ee6598a863314ca98d48ea9c2e6 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Dec 2023 23:50:38 -0500 Subject: [PATCH 305/420] Use inference dtype for unet memory usage estimation. --- comfy/model_base.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index 412c837925c..a7582b330d9 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -177,9 +177,12 @@ def set_inpaint(self): def memory_required(self, input_shape): if comfy.model_management.xformers_enabled() or comfy.model_management.pytorch_attention_flash_attention(): + dtype = self.get_dtype() + if self.manual_cast_dtype is not None: + dtype = self.manual_cast_dtype #TODO: this needs to be tweaked area = input_shape[0] * input_shape[2] * input_shape[3] - return (area * comfy.model_management.dtype_size(self.get_dtype()) / 50) * (1024 * 1024) + return (area * comfy.model_management.dtype_size(dtype) / 50) * (1024 * 1024) else: #TODO: this formula might be too aggressive since I tweaked the sub-quad and split algorithms to use less memory. area = input_shape[0] * input_shape[2] * input_shape[3] From 32b7e7e769c206a06bf6e10ad2ddb6af9a378f56 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 12 Dec 2023 03:32:23 -0500 Subject: [PATCH 306/420] Add manual cast to controlnet. --- comfy/cldm/cldm.py | 28 +++++++++++------------ comfy/controlnet.py | 54 +++++++++++++++++++++++++++------------------ 2 files changed, 46 insertions(+), 36 deletions(-) diff --git a/comfy/cldm/cldm.py b/comfy/cldm/cldm.py index 00373a7903f..5eee5a51c95 100644 --- a/comfy/cldm/cldm.py +++ b/comfy/cldm/cldm.py @@ -141,24 +141,24 @@ def __init__( ) ] ) - self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, operations=operations)]) + self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels, operations=operations, dtype=self.dtype, device=device)]) self.input_hint_block = TimestepEmbedSequential( - operations.conv_nd(dims, hint_channels, 16, 3, padding=1), + operations.conv_nd(dims, hint_channels, 16, 3, padding=1, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 16, 16, 3, padding=1), + operations.conv_nd(dims, 16, 16, 3, padding=1, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 16, 32, 3, padding=1, stride=2), + operations.conv_nd(dims, 16, 32, 3, padding=1, stride=2, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 32, 32, 3, padding=1), + operations.conv_nd(dims, 32, 32, 3, padding=1, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 32, 96, 3, padding=1, stride=2), + operations.conv_nd(dims, 32, 96, 3, padding=1, stride=2, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 96, 96, 3, padding=1), + operations.conv_nd(dims, 96, 96, 3, padding=1, dtype=self.dtype, device=device), nn.SiLU(), - operations.conv_nd(dims, 96, 256, 3, padding=1, stride=2), + operations.conv_nd(dims, 96, 256, 3, padding=1, stride=2, dtype=self.dtype, device=device), nn.SiLU(), - zero_module(operations.conv_nd(dims, 256, model_channels, 3, padding=1)) + operations.conv_nd(dims, 256, model_channels, 3, padding=1, dtype=self.dtype, device=device) ) self._feature_size = model_channels @@ -206,7 +206,7 @@ def __init__( ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) - self.zero_convs.append(self.make_zero_conv(ch, operations=operations)) + self.zero_convs.append(self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: @@ -234,7 +234,7 @@ def __init__( ) ch = out_ch input_block_chans.append(ch) - self.zero_convs.append(self.make_zero_conv(ch, operations=operations)) + self.zero_convs.append(self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device)) ds *= 2 self._feature_size += ch @@ -276,11 +276,11 @@ def __init__( operations=operations )] self.middle_block = TimestepEmbedSequential(*mid_block) - self.middle_block_out = self.make_zero_conv(ch, operations=operations) + self.middle_block_out = self.make_zero_conv(ch, operations=operations, dtype=self.dtype, device=device) self._feature_size += ch - def make_zero_conv(self, channels, operations=None): - return TimestepEmbedSequential(zero_module(operations.conv_nd(self.dims, channels, channels, 1, padding=0))) + def make_zero_conv(self, channels, operations=None, dtype=None, device=None): + return TimestepEmbedSequential(operations.conv_nd(self.dims, channels, channels, 1, padding=0, dtype=dtype, device=device)) def forward(self, x, hint, timesteps, context, y=None, **kwargs): t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 3212ac8c4b9..110b5c7c290 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -36,13 +36,13 @@ def __init__(self, device=None): self.cond_hint = None self.strength = 1.0 self.timestep_percent_range = (0.0, 1.0) + self.global_average_pooling = False self.timestep_range = None if device is None: device = comfy.model_management.get_torch_device() self.device = device self.previous_controlnet = None - self.global_average_pooling = False def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(0.0, 1.0)): self.cond_hint_original = cond_hint @@ -77,6 +77,7 @@ def copy_to(self, c): c.cond_hint_original = self.cond_hint_original c.strength = self.strength c.timestep_percent_range = self.timestep_percent_range + c.global_average_pooling = self.global_average_pooling def inference_memory_requirements(self, dtype): if self.previous_controlnet is not None: @@ -129,12 +130,14 @@ def control_merge(self, control_input, control_output, control_prev, output_dtyp return out class ControlNet(ControlBase): - def __init__(self, control_model, global_average_pooling=False, device=None): + def __init__(self, control_model, global_average_pooling=False, device=None, load_device=None, manual_cast_dtype=None): super().__init__(device) self.control_model = control_model - self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=comfy.model_management.get_torch_device(), offload_device=comfy.model_management.unet_offload_device()) + self.load_device = load_device + self.control_model_wrapped = comfy.model_patcher.ModelPatcher(self.control_model, load_device=load_device, offload_device=comfy.model_management.unet_offload_device()) self.global_average_pooling = global_average_pooling self.model_sampling_current = None + self.manual_cast_dtype = manual_cast_dtype def get_control(self, x_noisy, t, cond, batched_number): control_prev = None @@ -149,11 +152,8 @@ def get_control(self, x_noisy, t, cond, batched_number): return None dtype = self.control_model.dtype - if comfy.model_management.supports_dtype(self.device, dtype): - precision_scope = lambda a: contextlib.nullcontext(a) - else: - precision_scope = torch.autocast - dtype = torch.float32 + if self.manual_cast_dtype is not None: + dtype = self.manual_cast_dtype output_dtype = x_noisy.dtype if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: @@ -171,12 +171,11 @@ def get_control(self, x_noisy, t, cond, batched_number): timestep = self.model_sampling_current.timestep(t) x_noisy = self.model_sampling_current.calculate_input(t, x_noisy) - with precision_scope(comfy.model_management.get_autocast_device(self.device)): - control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(dtype), y=y) + control = self.control_model(x=x_noisy.to(dtype), hint=self.cond_hint, timesteps=timestep.float(), context=context.to(dtype), y=y) return self.control_merge(None, control, control_prev, output_dtype) def copy(self): - c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling) + c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling, load_device=self.load_device, manual_cast_dtype=self.manual_cast_dtype) self.copy_to(c) return c @@ -207,10 +206,11 @@ def __init__(self, in_features: int, out_features: int, bias: bool = True, self.bias = None def forward(self, input): + weight, bias = comfy.ops.cast_bias_weight(self, input) if self.up is not None: - return torch.nn.functional.linear(input, self.weight.to(dtype=input.dtype, device=input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias) + return torch.nn.functional.linear(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias) else: - return torch.nn.functional.linear(input, self.weight.to(dtype=input.dtype, device=input.device), self.bias) + return torch.nn.functional.linear(input, weight, bias) class Conv2d(torch.nn.Module): def __init__( @@ -246,10 +246,11 @@ def __init__( def forward(self, input): + weight, bias = comfy.ops.cast_bias_weight(self, input) if self.up is not None: - return torch.nn.functional.conv2d(input, self.weight.to(dtype=input.dtype, device=input.device) + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, weight + (torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1))).reshape(self.weight.shape).type(input.dtype), bias, self.stride, self.padding, self.dilation, self.groups) else: - return torch.nn.functional.conv2d(input, self.weight.to(dtype=input.dtype, device=input.device), self.bias, self.stride, self.padding, self.dilation, self.groups) + return torch.nn.functional.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups) class ControlLora(ControlNet): @@ -263,12 +264,19 @@ def pre_run(self, model, percent_to_timestep_function): controlnet_config = model.model_config.unet_config.copy() controlnet_config.pop("out_channels") controlnet_config["hint_channels"] = self.control_weights["input_hint_block.0.weight"].shape[1] - class control_lora_ops(ControlLoraOps, comfy.ops.disable_weight_init): - pass + self.manual_cast_dtype = model.manual_cast_dtype + dtype = model.get_dtype() + if self.manual_cast_dtype is None: + class control_lora_ops(ControlLoraOps, comfy.ops.disable_weight_init): + pass + else: + class control_lora_ops(ControlLoraOps, comfy.ops.manual_cast): + pass + dtype = self.manual_cast_dtype + controlnet_config["operations"] = control_lora_ops + controlnet_config["dtype"] = dtype self.control_model = comfy.cldm.cldm.ControlNet(**controlnet_config) - dtype = model.get_dtype() - self.control_model.to(dtype) self.control_model.to(comfy.model_management.get_torch_device()) diffusion_model = model.diffusion_model sd = diffusion_model.state_dict() @@ -372,6 +380,10 @@ def load_controlnet(ckpt_path, model=None): if controlnet_config is None: unet_dtype = comfy.model_management.unet_dtype() controlnet_config = comfy.model_detection.model_config_from_unet(controlnet_data, prefix, unet_dtype, True).unet_config + load_device = comfy.model_management.get_torch_device() + manual_cast_dtype = comfy.model_management.unet_manual_cast(unet_dtype, load_device) + if manual_cast_dtype is not None: + controlnet_config["operations"] = comfy.ops.manual_cast controlnet_config.pop("out_channels") controlnet_config["hint_channels"] = controlnet_data["{}input_hint_block.0.weight".format(prefix)].shape[1] control_model = comfy.cldm.cldm.ControlNet(**controlnet_config) @@ -400,14 +412,12 @@ class WeightsLoader(torch.nn.Module): missing, unexpected = control_model.load_state_dict(controlnet_data, strict=False) print(missing, unexpected) - control_model = control_model.to(unet_dtype) - global_average_pooling = False filename = os.path.splitext(ckpt_path)[0] if filename.endswith("_shuffle") or filename.endswith("_shuffle_fp16"): #TODO: smarter way of enabling global_average_pooling global_average_pooling = True - control = ControlNet(control_model, global_average_pooling=global_average_pooling) + control = ControlNet(control_model, global_average_pooling=global_average_pooling, load_device=load_device, manual_cast_dtype=manual_cast_dtype) return control class T2IAdapter(ControlBase): From 824e4935f53fdbda8f4608f511b4c2e8daf79dfa Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 12 Dec 2023 12:03:29 -0500 Subject: [PATCH 307/420] Add dtype parameter to VAE object. --- comfy/sd.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 8c056e4ea2f..220637a05d7 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -151,7 +151,7 @@ def get_key_patches(self): return self.patcher.get_key_patches() class VAE: - def __init__(self, sd=None, device=None, config=None): + def __init__(self, sd=None, device=None, config=None, dtype=None): if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format sd = diffusers_convert.convert_vae_state_dict(sd) @@ -188,7 +188,9 @@ def __init__(self, sd=None, device=None, config=None): device = model_management.vae_device() self.device = device offload_device = model_management.vae_offload_device() - self.vae_dtype = model_management.vae_dtype() + if dtype is None: + dtype = model_management.vae_dtype() + self.vae_dtype = dtype self.first_stage_model.to(self.vae_dtype) self.output_device = model_management.intermediate_device() From b454a67bb964fc20bac0354d009c1c811a289d89 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 12 Dec 2023 19:09:53 -0500 Subject: [PATCH 308/420] Support segmind vega model. --- comfy/model_detection.py | 8 +++++++- comfy/supported_models.py | 12 +++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index c682c3e1a18..e3af422a310 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -289,7 +289,13 @@ def unet_config_from_diffusers_unet(state_dict, dtype): 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, 'use_temporal_attention': False, 'use_temporal_resblock': False} - supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B] + Segmind_Vega = {'use_checkpoint': False, 'image_size': 32, 'out_channels': 4, 'use_spatial_transformer': True, 'legacy': False, + 'num_classes': 'sequential', 'adm_in_channels': 2816, 'dtype': dtype, 'in_channels': 4, 'model_channels': 320, + 'num_res_blocks': [2, 2, 2], 'transformer_depth': [0, 0, 1, 1, 2, 2], 'transformer_depth_output': [0, 0, 0, 1, 1, 1, 2, 2, 2], + 'channel_mult': [1, 2, 4], 'transformer_depth_middle': -1, 'use_linear_in_transformer': True, 'context_dim': 2048, 'num_head_channels': 64, + 'use_temporal_attention': False, 'use_temporal_resblock': False} + + supported_models = [SDXL, SDXL_refiner, SD21, SD15, SD21_uncliph, SD21_unclipl, SDXL_mid_cnet, SDXL_small_cnet, SDXL_diffusers_inpaint, SSD_1B, Segmind_Vega] for unet_config in supported_models: matches = True diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 455323b9629..2f2dee871a2 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -217,6 +217,16 @@ class SSD1B(SDXL): "use_temporal_attention": False, } +class Segmind_Vega(SDXL): + unet_config = { + "model_channels": 320, + "use_linear_in_transformer": True, + "transformer_depth": [0, 0, 1, 1, 2, 2], + "context_dim": 2048, + "adm_in_channels": 2816, + "use_temporal_attention": False, + } + class SVD_img2vid(supported_models_base.BASE): unet_config = { "model_channels": 320, @@ -242,5 +252,5 @@ def get_model(self, state_dict, prefix="", device=None): def clip_target(self): return None -models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B] +models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega] models += [SVD_img2vid] From 390078904c791c7c66c08478ed3d657b42ba7888 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Wed, 13 Dec 2023 05:56:39 +0000 Subject: [PATCH 309/420] Group node fixes (#2259) * Prevent cleaning graph state on undo/redo * Remove pause rendering due to LG bug * Fix crash on disconnected internal reroutes * Fix widget inputs being incorrect order and value * Fix initial primitive values on connect * basic support for basic rerouted converted inputs * Populate primitive to reroute input * dont crash on bad primitive links * Fix convert to group changing control value * reduce restrictions * fix random crash in tests --- tests-ui/tests/groupNode.test.js | 134 ++++++++++++++++++++++++++-- tests-ui/utils/ezgraph.js | 8 ++ tests-ui/utils/index.js | 9 ++ web/extensions/core/groupNode.js | 127 +++++++++++++++++++++----- web/extensions/core/rerouteNode.js | 1 + web/extensions/core/widgetInputs.js | 19 +++- web/scripts/app.js | 4 +- 7 files changed, 275 insertions(+), 27 deletions(-) diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js index dc9d4bd49af..625890a0909 100644 --- a/tests-ui/tests/groupNode.test.js +++ b/tests-ui/tests/groupNode.test.js @@ -1,7 +1,7 @@ // @ts-check /// -const { start, createDefaultWorkflow } = require("../utils"); +const { start, createDefaultWorkflow, getNodeDef, checkBeforeAndAfterReload } = require("../utils"); const lg = require("../utils/litegraph"); describe("group node", () => { @@ -273,7 +273,7 @@ describe("group node", () => { let reroutes = []; let prevNode = nodes.ckpt; - for(let i = 0; i < 5; i++) { + for (let i = 0; i < 5; i++) { const reroute = ez.Reroute(); prevNode.outputs[0].connectTo(reroute.inputs[0]); prevNode = reroute; @@ -283,7 +283,7 @@ describe("group node", () => { const group = await convertToGroup(app, graph, "test", [...reroutes, ...Object.values(nodes)]); expect((await graph.toPrompt()).output).toEqual(getOutput()); - + group.menu["Convert to nodes"].call(); expect((await graph.toPrompt()).output).toEqual(getOutput()); }); @@ -407,12 +407,18 @@ describe("group node", () => { const decode = ez.VAEDecode(group2.outputs.LATENT, group2.outputs.VAE); const preview = ez.PreviewImage(decode.outputs[0]); - expect((await graph.toPrompt()).output).toEqual({ + const output = { [latent.id]: { inputs: { width: 512, height: 512, batch_size: 1 }, class_type: "EmptyLatentImage" }, [vae.id]: { inputs: { vae_name: "vae1.safetensors" }, class_type: "VAELoader" }, [decode.id]: { inputs: { samples: [latent.id + "", 0], vae: [vae.id + "", 0] }, class_type: "VAEDecode" }, [preview.id]: { inputs: { images: [decode.id + "", 0] }, class_type: "PreviewImage" }, - }); + }; + expect((await graph.toPrompt()).output).toEqual(output); + + // Ensure missing connections dont cause errors + group2.inputs.VAE.disconnect(); + delete output[decode.id].inputs.vae; + expect((await graph.toPrompt()).output).toEqual(output); }); test("displays generated image on group node", async () => { const { ez, graph, app } = await start(); @@ -673,6 +679,55 @@ describe("group node", () => { 2: { inputs: { text: "positive" }, class_type: "CLIPTextEncode" }, }); }); + test("correctly handles widget inputs", async () => { + const { ez, graph, app } = await start(); + const upscaleMethods = (await getNodeDef("ImageScaleBy")).input.required["upscale_method"][0]; + + const image = ez.LoadImage(); + const scale1 = ez.ImageScaleBy(image.outputs[0]); + const scale2 = ez.ImageScaleBy(image.outputs[0]); + const preview1 = ez.PreviewImage(scale1.outputs[0]); + const preview2 = ez.PreviewImage(scale2.outputs[0]); + scale1.widgets.upscale_method.value = upscaleMethods[1]; + scale1.widgets.upscale_method.convertToInput(); + + const group = await convertToGroup(app, graph, "test", [scale1, scale2]); + expect(group.inputs.length).toBe(3); + expect(group.inputs[0].input.type).toBe("IMAGE"); + expect(group.inputs[1].input.type).toBe("IMAGE"); + expect(group.inputs[2].input.type).toBe("COMBO"); + + // Ensure links are maintained + expect(group.inputs[0].connection?.originNode?.id).toBe(image.id); + expect(group.inputs[1].connection?.originNode?.id).toBe(image.id); + expect(group.inputs[2].connection).toBeFalsy(); + + // Ensure primitive gets correct type + const primitive = ez.PrimitiveNode(); + primitive.outputs[0].connectTo(group.inputs[2]); + expect(primitive.widgets.value.widget.options.values).toBe(upscaleMethods); + expect(primitive.widgets.value.value).toBe(upscaleMethods[1]); // Ensure value is copied + primitive.widgets.value.value = upscaleMethods[1]; + + await checkBeforeAndAfterReload(graph, async (r) => { + const scale1id = r ? `${group.id}:0` : scale1.id; + const scale2id = r ? `${group.id}:1` : scale2.id; + // Ensure widget value is applied to prompt + expect((await graph.toPrompt()).output).toStrictEqual({ + [image.id]: { inputs: { image: "example.png", upload: "image" }, class_type: "LoadImage" }, + [scale1id]: { + inputs: { upscale_method: upscaleMethods[1], scale_by: 1, image: [`${image.id}`, 0] }, + class_type: "ImageScaleBy", + }, + [scale2id]: { + inputs: { upscale_method: "nearest-exact", scale_by: 1, image: [`${image.id}`, 0] }, + class_type: "ImageScaleBy", + }, + [preview1.id]: { inputs: { images: [`${scale1id}`, 0] }, class_type: "PreviewImage" }, + [preview2.id]: { inputs: { images: [`${scale2id}`, 0] }, class_type: "PreviewImage" }, + }); + }); + }); test("adds widgets in node execution order", async () => { const { ez, graph, app } = await start(); const scale = ez.LatentUpscale(); @@ -846,4 +901,73 @@ describe("group node", () => { expect(p2.widgets.control_after_generate.value).toBe("randomize"); expect(p2.widgets.control_filter_list.value).toBe("/.+/"); }); + test("internal reroutes work with converted inputs and merge options", async () => { + const { ez, graph, app } = await start(); + const vae = ez.VAELoader(); + const latent = ez.EmptyLatentImage(); + const decode = ez.VAEDecode(latent.outputs.LATENT, vae.outputs.VAE); + const scale = ez.ImageScale(decode.outputs.IMAGE); + ez.PreviewImage(scale.outputs.IMAGE); + + const r1 = ez.Reroute(); + const r2 = ez.Reroute(); + + latent.widgets.width.value = 64; + latent.widgets.height.value = 128; + + latent.widgets.width.convertToInput(); + latent.widgets.height.convertToInput(); + latent.widgets.batch_size.convertToInput(); + + scale.widgets.width.convertToInput(); + scale.widgets.height.convertToInput(); + + r1.inputs[0].input.label = "hbw"; + r1.outputs[0].connectTo(latent.inputs.height); + r1.outputs[0].connectTo(latent.inputs.batch_size); + r1.outputs[0].connectTo(scale.inputs.width); + + r2.inputs[0].input.label = "wh"; + r2.outputs[0].connectTo(latent.inputs.width); + r2.outputs[0].connectTo(scale.inputs.height); + + const group = await convertToGroup(app, graph, "test", [r1, r2, latent, decode, scale]); + + expect(group.inputs[0].input.type).toBe("VAE"); + expect(group.inputs[1].input.type).toBe("INT"); + expect(group.inputs[2].input.type).toBe("INT"); + + const p1 = ez.PrimitiveNode(); + const p2 = ez.PrimitiveNode(); + p1.outputs[0].connectTo(group.inputs[1]); + p2.outputs[0].connectTo(group.inputs[2]); + + expect(p1.widgets.value.widget.options?.min).toBe(16); // width/height min + expect(p1.widgets.value.widget.options?.max).toBe(4096); // batch max + expect(p1.widgets.value.widget.options?.step).toBe(80); // width/height step * 10 + + expect(p2.widgets.value.widget.options?.min).toBe(16); // width/height min + expect(p2.widgets.value.widget.options?.max).toBe(8192); // width/height max + expect(p2.widgets.value.widget.options?.step).toBe(80); // width/height step * 10 + + expect(p1.widgets.value.value).toBe(128); + expect(p2.widgets.value.value).toBe(64); + + p1.widgets.value.value = 16; + p2.widgets.value.value = 32; + + await checkBeforeAndAfterReload(graph, async (r) => { + const id = (v) => (r ? `${group.id}:` : "") + v; + expect((await graph.toPrompt()).output).toStrictEqual({ + 1: { inputs: { vae_name: "vae1.safetensors" }, class_type: "VAELoader" }, + [id(2)]: { inputs: { width: 32, height: 16, batch_size: 16 }, class_type: "EmptyLatentImage" }, + [id(3)]: { inputs: { samples: [id(2), 0], vae: ["1", 0] }, class_type: "VAEDecode" }, + [id(4)]: { + inputs: { upscale_method: "nearest-exact", width: 16, height: 32, crop: "disabled", image: [id(3), 0] }, + class_type: "ImageScale", + }, + 5: { inputs: { images: [id(4), 0] }, class_type: "PreviewImage" }, + }); + }); + }); }); diff --git a/tests-ui/utils/ezgraph.js b/tests-ui/utils/ezgraph.js index 3101aa29297..8a55246ee3d 100644 --- a/tests-ui/utils/ezgraph.js +++ b/tests-ui/utils/ezgraph.js @@ -78,6 +78,14 @@ export class EzInput extends EzSlot { this.input = input; } + get connection() { + const link = this.node.node.inputs?.[this.index]?.link; + if (link == null) { + return null; + } + return new EzConnection(this.node.app, this.node.app.graph.links[link]); + } + disconnect() { this.node.node.disconnectInput(this.index); } diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index 3a018f566e4..6a08e8594e9 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -104,3 +104,12 @@ export function createDefaultWorkflow(ez, graph) { return { ckpt, pos, neg, empty, sampler, decode, save }; } + +export async function getNodeDefs() { + const { api } = require("../../web/scripts/api"); + return api.getNodeDefs(); +} + +export async function getNodeDef(nodeId) { + return (await getNodeDefs())[nodeId]; +} \ No newline at end of file diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 9a1d9b20760..dc962ac2465 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -174,6 +174,11 @@ export class GroupNodeConfig { node.index = i; this.processNode(node, seenInputs, seenOutputs); } + + for (const p of this.#convertedToProcess) { + p(); + } + this.#convertedToProcess = null; await app.registerNodeDef("workflow/" + this.name, this.nodeDef); } @@ -192,7 +197,10 @@ export class GroupNodeConfig { if (!this.linksFrom[sourceNodeId]) { this.linksFrom[sourceNodeId] = {}; } - this.linksFrom[sourceNodeId][sourceNodeSlot] = l; + if (!this.linksFrom[sourceNodeId][sourceNodeSlot]) { + this.linksFrom[sourceNodeId][sourceNodeSlot] = []; + } + this.linksFrom[sourceNodeId][sourceNodeSlot].push(l); if (!this.linksTo[targetNodeId]) { this.linksTo[targetNodeId] = {}; @@ -230,11 +238,11 @@ export class GroupNodeConfig { // Skip as its not linked if (!linksFrom) return; - let type = linksFrom["0"][5]; + let type = linksFrom["0"][0][5]; if (type === "COMBO") { // Use the array items const source = node.outputs[0].widget.name; - const fromTypeName = this.nodeData.nodes[linksFrom["0"][2]].type; + const fromTypeName = this.nodeData.nodes[linksFrom["0"][0][2]].type; const fromType = globalDefs[fromTypeName]; const input = fromType.input.required[source] ?? fromType.input.optional[source]; type = input[0]; @@ -258,10 +266,33 @@ export class GroupNodeConfig { return null; } + let config = {}; let rerouteType = "*"; if (linksFrom) { - const [, , id, slot] = linksFrom["0"]; - rerouteType = this.nodeData.nodes[id].inputs[slot].type; + for (const [, , id, slot] of linksFrom["0"]) { + const node = this.nodeData.nodes[id]; + const input = node.inputs[slot]; + if (rerouteType === "*") { + rerouteType = input.type; + } + if (input.widget) { + const targetDef = globalDefs[node.type]; + const targetWidget = + targetDef.input.required[input.widget.name] ?? targetDef.input.optional[input.widget.name]; + + const widget = [targetWidget[0], config]; + const res = mergeIfValid( + { + widget, + }, + targetWidget, + false, + null, + widget + ); + config = res?.customConfig ?? config; + } + } } else if (linksTo) { const [id, slot] = linksTo["0"]; rerouteType = this.nodeData.nodes[id].outputs[slot].type; @@ -282,10 +313,11 @@ export class GroupNodeConfig { } } + config.forceInput = true; return { input: { required: { - [rerouteType]: [rerouteType, {}], + [rerouteType]: [rerouteType, config], }, }, output: [rerouteType], @@ -420,10 +452,18 @@ export class GroupNodeConfig { defaultInput: true, }); this.nodeDef.input.required[name] = config; + this.newToOldWidgetMap[name] = { node, inputName }; + + if (!this.oldToNewWidgetMap[node.index]) { + this.oldToNewWidgetMap[node.index] = {}; + } + this.oldToNewWidgetMap[node.index][inputName] = name; + inputMap[slots.length + i] = this.inputCount++; } } + #convertedToProcess = []; processNodeInputs(node, seenInputs, inputs) { const inputMapping = []; @@ -434,7 +474,11 @@ export class GroupNodeConfig { const linksTo = this.linksTo[node.index] ?? {}; const inputMap = (this.oldToNewInputMap[node.index] = {}); this.processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs); - this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs); + + // Converted inputs have to be processed after all other nodes as they'll be at the end of the list + this.#convertedToProcess.push(() => + this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs) + ); return inputMapping; } @@ -597,11 +641,15 @@ export class GroupNodeHandler { const output = this.groupData.newToOldOutputMap[link.origin_slot]; let innerNode = this.innerNodes[output.node.index]; let l; - while (innerNode.type === "Reroute") { + while (innerNode?.type === "Reroute") { l = innerNode.getInputLink(0); innerNode = innerNode.getInputNode(0); } + if (!innerNode) { + return null; + } + if (l && GroupNodeHandler.isGroupNode(innerNode)) { return innerNode.updateLink(l); } @@ -669,6 +717,8 @@ export class GroupNodeHandler { top = newNode.pos[1]; } + if (!newNode.widgets) continue; + const map = this.groupData.oldToNewWidgetMap[innerNode.index]; if (map) { const widgets = Object.keys(map); @@ -725,7 +775,7 @@ export class GroupNodeHandler { } }; - const reconnectOutputs = () => { + const reconnectOutputs = (selectedIds) => { for (let groupOutputId = 0; groupOutputId < node.outputs?.length; groupOutputId++) { const output = node.outputs[groupOutputId]; if (!output.links) continue; @@ -865,7 +915,7 @@ export class GroupNodeHandler { if (innerNode.type === "PrimitiveNode") { innerNode.primitiveValue = newValue; const primitiveLinked = this.groupData.primitiveToWidget[old.node.index]; - for (const linked of primitiveLinked) { + for (const linked of primitiveLinked ?? []) { const node = this.innerNodes[linked.nodeId]; const widget = node.widgets.find((w) => w.name === linked.inputName); @@ -874,6 +924,18 @@ export class GroupNodeHandler { } } continue; + } else if (innerNode.type === "Reroute") { + const rerouteLinks = this.groupData.linksFrom[old.node.index]; + for (const [_, , targetNodeId, targetSlot] of rerouteLinks["0"]) { + const node = this.innerNodes[targetNodeId]; + const input = node.inputs[targetSlot]; + if (input.widget) { + const widget = node.widgets?.find((w) => w.name === input.widget.name); + if (widget) { + widget.value = newValue; + } + } + } } const widget = innerNode.widgets?.find((w) => w.name === old.inputName); @@ -901,33 +963,58 @@ export class GroupNodeHandler { this.node.widgets[targetWidgetIndex + i].value = primitiveNode.widgets[i].value; } } + return true; + } + + populateReroute(node, nodeId, map) { + if (node.type !== "Reroute") return; + + const link = this.groupData.linksFrom[nodeId]?.[0]?.[0]; + if (!link) return; + const [, , targetNodeId, targetNodeSlot] = link; + const targetNode = this.groupData.nodeData.nodes[targetNodeId]; + const inputs = targetNode.inputs; + const targetWidget = inputs?.[targetNodeSlot].widget; + if (!targetWidget) return; + + const offset = inputs.length - (targetNode.widgets_values?.length ?? 0); + const v = targetNode.widgets_values?.[targetNodeSlot - offset]; + if (v == null) return; + + const widgetName = Object.values(map)[0]; + const widget = this.node.widgets.find(w => w.name === widgetName); + if(widget) { + widget.value = v; + } } + populateWidgets() { + if (!this.node.widgets) return; + for (let nodeId = 0; nodeId < this.groupData.nodeData.nodes.length; nodeId++) { const node = this.groupData.nodeData.nodes[nodeId]; - - if (!node.widgets_values?.length) continue; - - const map = this.groupData.oldToNewWidgetMap[nodeId]; + const map = this.groupData.oldToNewWidgetMap[nodeId] ?? {}; const widgets = Object.keys(map); + if (!node.widgets_values?.length) { + // special handling for populating values into reroutes + // this allows primitives connect to them to pick up the correct value + this.populateReroute(node, nodeId, map); + continue; + } + let linkedShift = 0; for (let i = 0; i < widgets.length; i++) { const oldName = widgets[i]; const newName = map[oldName]; const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); const mainWidget = this.node.widgets[widgetIndex]; - if (!newName) { - // New name will be null if its a converted widget - this.populatePrimitive(node, nodeId, oldName, i, linkedShift); - + if (this.populatePrimitive(node, nodeId, oldName, i, linkedShift)) { // Find the inner widget and shift by the number of linked widgets as they will have been removed too const innerWidget = this.innerNodes[nodeId].widgets?.find((w) => w.name === oldName); linkedShift += innerWidget.linkedWidgets?.length ?? 0; - continue; } - if (widgetIndex === -1) { continue; } diff --git a/web/extensions/core/rerouteNode.js b/web/extensions/core/rerouteNode.js index cfa952f3c47..4feff91e50e 100644 --- a/web/extensions/core/rerouteNode.js +++ b/web/extensions/core/rerouteNode.js @@ -54,6 +54,7 @@ app.registerExtension({ const linkId = currentNode.inputs[0].link; if (linkId !== null) { const link = app.graph.links[linkId]; + if (!link) return; const node = app.graph.getNodeById(link.origin_id); const type = node.constructor.type; if (type === "Reroute") { diff --git a/web/extensions/core/widgetInputs.js b/web/extensions/core/widgetInputs.js index 865db4923f9..3f1c1f8c126 100644 --- a/web/extensions/core/widgetInputs.js +++ b/web/extensions/core/widgetInputs.js @@ -180,7 +180,7 @@ export function mergeIfValid(output, config2, forceUpdate, recreateWidget, confi const isNumber = config1[0] === "INT" || config1[0] === "FLOAT"; for (const k of keys.values()) { - if (k !== "default" && k !== "forceInput" && k !== "defaultInput") { + if (k !== "default" && k !== "forceInput" && k !== "defaultInput" && k !== "control_after_generate" && k !== "multiline") { let v1 = config1[1][k]; let v2 = config2[1]?.[k]; @@ -633,6 +633,14 @@ app.registerExtension({ } } + // Restore any saved control values + const controlValues = this.controlValues; + if(this.lastType === this.widgets[0].type && controlValues?.length === this.widgets.length - 1) { + for(let i = 0; i < controlValues.length; i++) { + this.widgets[i + 1].value = controlValues[i]; + } + } + // When our value changes, update other widgets to reflect our changes // e.g. so LoadImage shows correct image const callback = widget.callback; @@ -721,6 +729,15 @@ app.registerExtension({ w.onRemove(); } } + + // Temporarily store the current values in case the node is being recreated + // e.g. by group node conversion + this.controlValues = []; + this.lastType = this.widgets[0]?.type; + for(let i = 1; i < this.widgets.length; i++) { + this.controlValues.push(this.widgets[i].value); + } + setTimeout(() => { delete this.lastType; delete this.controlValues }, 15); this.widgets.length = 0; } } diff --git a/web/scripts/app.js b/web/scripts/app.js index d2a6f4de425..62169abfb82 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1774,7 +1774,9 @@ export class ComfyApp { if (parent?.updateLink) { link = parent.updateLink(link); } - inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; + if (link) { + inputs[node.inputs[i].name] = [String(link.origin_id), parseInt(link.origin_slot)]; + } } } } From 6761233e9dd875002c4ff9dac4574828ac564156 Mon Sep 17 00:00:00 2001 From: Rafie Walker Date: Wed, 13 Dec 2023 21:52:11 +0100 Subject: [PATCH 310/420] Implement Self-Attention Guidance (#2201) * First SAG test * need to put extra options on the model instead of patcher * no errors and results seem not-broken * Use @ashen-uncensored formula, which works better!!! * Fix a crash when using weird resolutions. Remove an unnecessary UNet call * Improve comments, optimize memory in blur routine * SAG works with sampler_cfg_function --- comfy/samplers.py | 75 ++++++++++++++++++++++--- comfy_extras/nodes_sag.py | 115 ++++++++++++++++++++++++++++++++++++++ nodes.py | 1 + 3 files changed, 182 insertions(+), 9 deletions(-) create mode 100644 comfy_extras/nodes_sag.py diff --git a/comfy/samplers.py b/comfy/samplers.py index ffc1fe3acb8..1cdad736d76 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1,6 +1,7 @@ from .k_diffusion import sampling as k_diffusion_sampling from .extra_samplers import uni_pc import torch +import torch.nn.functional as F import enum from comfy import model_management import math @@ -60,10 +61,10 @@ def get_area_and_mult(conds, x_in, timestep_in): for t in range(rr): mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) - conditionning = {} + conditioning = {} model_conds = conds["model_conds"] for c in model_conds: - conditionning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) + conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) control = None if 'control' in conds: @@ -82,7 +83,7 @@ def get_area_and_mult(conds, x_in, timestep_in): patches['middle_patch'] = [gligen_patch] - return (input_x, mult, conditionning, area, control, patches) + return (input_x, mult, conditioning, area, control, patches) def cond_equal_size(c1, c2): if c1 is c2: @@ -246,15 +247,71 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): return out_cond, out_uncond - if math.isclose(cond_scale, 1.0): + # if we're doing SAG, we still need to do uncond guidance, even though the cond and uncond will cancel out. + if math.isclose(cond_scale, 1.0) and "sag" not in model_options: uncond = None - cond, uncond = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) + cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) + cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale if "sampler_cfg_function" in model_options: - args = {"cond": x - cond, "uncond": x - uncond, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} - return x - model_options["sampler_cfg_function"](args) - else: - return uncond + (cond - uncond) * cond_scale + args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} + cfg_result = x - model_options["sampler_cfg_function"](args) + + if "sag" in model_options: + assert uncond is not None, "SAG requires uncond guidance" + sag_scale = model_options["sag_scale"] + sag_sigma = model_options["sag_sigma"] + sag_threshold = model_options.get("sag_threshold", 1.0) + + # these methods are added by the sag patcher + uncond_attn = model.get_attn_scores() + mid_shape = model.get_mid_block_shape() + # create the adversarially blurred image + degraded = create_blur_map(uncond_pred, uncond_attn, mid_shape, sag_sigma, sag_threshold) + degraded_noised = degraded + x - uncond_pred + # call into the UNet + (sag, _) = calc_cond_uncond_batch(model, uncond, None, degraded_noised, timestep, model_options) + cfg_result += (degraded - sag) * sag_scale + return cfg_result + +def create_blur_map(x0, attn, mid_shape, sigma=3.0, threshold=1.0): + # reshape and GAP the attention map + _, hw1, hw2 = attn.shape + b, _, lh, lw = x0.shape + attn = attn.reshape(b, -1, hw1, hw2) + # Global Average Pool + mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold + # Reshape + mask = ( + mask.reshape(b, *mid_shape) + .unsqueeze(1) + .type(attn.dtype) + ) + # Upsample + mask = F.interpolate(mask, (lh, lw)) + + blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma) + blurred = blurred * mask + x0 * (1 - mask) + return blurred + +def gaussian_blur_2d(img, kernel_size, sigma): + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + + x_kernel = pdf / pdf.sum() + x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) + + kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) + kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) + + padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] + + img = F.pad(img, padding, mode="reflect") + img = F.conv2d(img, kernel2d, groups=img.shape[-3]) + return img class CFGNoisePredictor(torch.nn.Module): def __init__(self, model): diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py new file mode 100644 index 00000000000..1ec0c93ac6c --- /dev/null +++ b/comfy_extras/nodes_sag.py @@ -0,0 +1,115 @@ +import torch +from torch import einsum +from einops import rearrange, repeat +import os +from comfy.ldm.modules.attention import optimized_attention, _ATTN_PRECISION + +# from comfy/ldm/modules/attention.py +# but modified to return attention scores as well as output +def attention_basic_with_sim(q, k, v, heads, mask=None): + b, _, dim_head = q.shape + dim_head //= heads + scale = dim_head ** -0.5 + + h = heads + q, k, v = map( + lambda t: t.unsqueeze(3) + .reshape(b, -1, heads, dim_head) + .permute(0, 2, 1, 3) + .reshape(b * heads, -1, dim_head) + .contiguous(), + (q, k, v), + ) + + # force cast to fp32 to avoid overflowing + if _ATTN_PRECISION =="fp32": + with torch.autocast(enabled=False, device_type = 'cuda'): + q, k = q.float(), k.float() + sim = einsum('b i d, b j d -> b i j', q, k) * scale + else: + sim = einsum('b i d, b j d -> b i j', q, k) * scale + + del q, k + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + sim = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v) + out = ( + out.unsqueeze(0) + .reshape(b, heads, -1, dim_head) + .permute(0, 2, 1, 3) + .reshape(b, -1, heads * dim_head) + ) + return (out, sim) + +class SagNode: + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.1}), + "blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, scale, blur_sigma): + m = model.clone() + # set extra options on the model + m.model_options["sag"] = True + m.model_options["sag_scale"] = scale + m.model_options["sag_sigma"] = blur_sigma + + attn_scores = None + mid_block_shape = None + m.model.get_attn_scores = lambda: attn_scores + m.model.get_mid_block_shape = lambda: mid_block_shape + + # TODO: make this work properly with chunked batches + # currently, we can only save the attn from one UNet call + def attn_and_record(q, k, v, extra_options): + nonlocal attn_scores + # if uncond, save the attention scores + heads = extra_options["n_heads"] + cond_or_uncond = extra_options["cond_or_uncond"] + b = q.shape[0] // len(cond_or_uncond) + if 1 in cond_or_uncond: + uncond_index = cond_or_uncond.index(1) + # do the entire attention operation, but save the attention scores to attn_scores + (out, sim) = attention_basic_with_sim(q, k, v, heads=heads) + # when using a higher batch size, I BELIEVE the result batch dimension is [uc1, ... ucn, c1, ... cn] + n_slices = heads * b + attn_scores = sim[n_slices * uncond_index:n_slices * (uncond_index+1)] + return out + else: + return optimized_attention(q, k, v, heads=heads) + + # from diffusers: + # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch + def set_model_patch_replace(patch, name, key): + to = m.model_options["transformer_options"] + if "patches_replace" not in to: + to["patches_replace"] = {} + if name not in to["patches_replace"]: + to["patches_replace"][name] = {} + to["patches_replace"][name][key] = patch + set_model_patch_replace(attn_and_record, "attn1", ("middle", 0, 0)) + # from diffusers: + # unet.mid_block.attentions[0].register_forward_hook() + def forward_hook(m, inp, out): + nonlocal mid_block_shape + mid_block_shape = out[0].shape[-2:] + m.model.diffusion_model.middle_block[0].register_forward_hook(forward_hook) + return (m, ) + +NODE_CLASS_MAPPINGS = { + "Self-Attention Guidance": SagNode, +} diff --git a/nodes.py b/nodes.py index db96e0e2d66..3d24750cbfa 100644 --- a/nodes.py +++ b/nodes.py @@ -1867,6 +1867,7 @@ def init_custom_nodes(): "nodes_model_downscale.py", "nodes_images.py", "nodes_video_model.py", + "nodes_sag.py", ] for node_file in extras_files: From ba04a87d104ca73d8ed8e8423706edcdf5e209a8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 13 Dec 2023 16:10:03 -0500 Subject: [PATCH 311/420] Refactor and improve the sag node. Moved all the sag related code to comfy_extras/nodes_sag.py --- comfy/model_patcher.py | 19 +- comfy/samplers.py | 527 +++++++++++++++++--------------------- comfy_extras/nodes_sag.py | 103 ++++++-- 3 files changed, 331 insertions(+), 318 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 55ca913ec78..e0acdc961ee 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -61,6 +61,9 @@ def set_model_sampler_cfg_function(self, sampler_cfg_function): else: self.model_options["sampler_cfg_function"] = sampler_cfg_function + def set_model_sampler_post_cfg_function(self, post_cfg_function): + self.model_options["sampler_post_cfg_function"] = self.model_options.get("sampler_post_cfg_function", []) + [post_cfg_function] + def set_model_unet_function_wrapper(self, unet_wrapper_function): self.model_options["model_function_wrapper"] = unet_wrapper_function @@ -70,13 +73,17 @@ def set_model_patch(self, patch, name): to["patches"] = {} to["patches"][name] = to["patches"].get(name, []) + [patch] - def set_model_patch_replace(self, patch, name, block_name, number): + def set_model_patch_replace(self, patch, name, block_name, number, transformer_index=None): to = self.model_options["transformer_options"] if "patches_replace" not in to: to["patches_replace"] = {} if name not in to["patches_replace"]: to["patches_replace"][name] = {} - to["patches_replace"][name][(block_name, number)] = patch + if transformer_index is not None: + block = (block_name, number, transformer_index) + else: + block = (block_name, number) + to["patches_replace"][name][block] = patch def set_model_attn1_patch(self, patch): self.set_model_patch(patch, "attn1_patch") @@ -84,11 +91,11 @@ def set_model_attn1_patch(self, patch): def set_model_attn2_patch(self, patch): self.set_model_patch(patch, "attn2_patch") - def set_model_attn1_replace(self, patch, block_name, number): - self.set_model_patch_replace(patch, "attn1", block_name, number) + def set_model_attn1_replace(self, patch, block_name, number, transformer_index=None): + self.set_model_patch_replace(patch, "attn1", block_name, number, transformer_index) - def set_model_attn2_replace(self, patch, block_name, number): - self.set_model_patch_replace(patch, "attn2", block_name, number) + def set_model_attn2_replace(self, patch, block_name, number, transformer_index=None): + self.set_model_patch_replace(patch, "attn2", block_name, number, transformer_index) def set_model_attn1_output_patch(self, patch): self.set_model_patch(patch, "attn1_output_patch") diff --git a/comfy/samplers.py b/comfy/samplers.py index 1cdad736d76..106e7287656 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1,7 +1,6 @@ from .k_diffusion import sampling as k_diffusion_sampling from .extra_samplers import uni_pc import torch -import torch.nn.functional as F import enum from comfy import model_management import math @@ -9,309 +8,259 @@ import comfy.utils import comfy.conds +def get_area_and_mult(conds, x_in, timestep_in): + area = (x_in.shape[2], x_in.shape[3], 0, 0) + strength = 1.0 + + if 'timestep_start' in conds: + timestep_start = conds['timestep_start'] + if timestep_in[0] > timestep_start: + return None + if 'timestep_end' in conds: + timestep_end = conds['timestep_end'] + if timestep_in[0] < timestep_end: + return None + if 'area' in conds: + area = conds['area'] + if 'strength' in conds: + strength = conds['strength'] + + input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] + if 'mask' in conds: + # Scale the mask to the size of the input + # The mask should have been resized as we began the sampling process + mask_strength = 1.0 + if "mask_strength" in conds: + mask_strength = conds["mask_strength"] + mask = conds['mask'] + assert(mask.shape[1] == x_in.shape[2]) + assert(mask.shape[2] == x_in.shape[3]) + mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength + mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1) + else: + mask = torch.ones_like(input_x) + mult = mask * strength + + if 'mask' not in conds: + rr = 8 + if area[2] != 0: + for t in range(rr): + mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1)) + if (area[0] + area[2]) < x_in.shape[2]: + for t in range(rr): + mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1)) + if area[3] != 0: + for t in range(rr): + mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1)) + if (area[1] + area[3]) < x_in.shape[3]: + for t in range(rr): + mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) + + conditioning = {} + model_conds = conds["model_conds"] + for c in model_conds: + conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) + + control = None + if 'control' in conds: + control = conds['control'] + + patches = None + if 'gligen' in conds: + gligen = conds['gligen'] + patches = {} + gligen_type = gligen[0] + gligen_model = gligen[1] + if gligen_type == "position": + gligen_patch = gligen_model.model.set_position(input_x.shape, gligen[2], input_x.device) + else: + gligen_patch = gligen_model.model.set_empty(input_x.shape, input_x.device) + + patches['middle_patch'] = [gligen_patch] + + return (input_x, mult, conditioning, area, control, patches) + +def cond_equal_size(c1, c2): + if c1 is c2: + return True + if c1.keys() != c2.keys(): + return False + for k in c1: + if not c1[k].can_concat(c2[k]): + return False + return True + +def can_concat_cond(c1, c2): + if c1[0].shape != c2[0].shape: + return False + + #control + if (c1[4] is None) != (c2[4] is None): + return False + if c1[4] is not None: + if c1[4] is not c2[4]: + return False + + #patches + if (c1[5] is None) != (c2[5] is None): + return False + if (c1[5] is not None): + if c1[5] is not c2[5]: + return False + + return cond_equal_size(c1[2], c2[2]) + +def cond_cat(c_list): + c_crossattn = [] + c_concat = [] + c_adm = [] + crossattn_max_len = 0 + + temp = {} + for x in c_list: + for k in x: + cur = temp.get(k, []) + cur.append(x[k]) + temp[k] = cur + + out = {} + for k in temp: + conds = temp[k] + out[k] = conds[0].concat(conds[1:]) + + return out + +def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): + out_cond = torch.zeros_like(x_in) + out_count = torch.ones_like(x_in) * 1e-37 + + out_uncond = torch.zeros_like(x_in) + out_uncond_count = torch.ones_like(x_in) * 1e-37 + + COND = 0 + UNCOND = 1 + + to_run = [] + for x in cond: + p = get_area_and_mult(x, x_in, timestep) + if p is None: + continue -#The main sampling function shared by all the samplers -#Returns denoised -def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): - def get_area_and_mult(conds, x_in, timestep_in): - area = (x_in.shape[2], x_in.shape[3], 0, 0) - strength = 1.0 - - if 'timestep_start' in conds: - timestep_start = conds['timestep_start'] - if timestep_in[0] > timestep_start: - return None - if 'timestep_end' in conds: - timestep_end = conds['timestep_end'] - if timestep_in[0] < timestep_end: - return None - if 'area' in conds: - area = conds['area'] - if 'strength' in conds: - strength = conds['strength'] - - input_x = x_in[:,:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] - if 'mask' in conds: - # Scale the mask to the size of the input - # The mask should have been resized as we began the sampling process - mask_strength = 1.0 - if "mask_strength" in conds: - mask_strength = conds["mask_strength"] - mask = conds['mask'] - assert(mask.shape[1] == x_in.shape[2]) - assert(mask.shape[2] == x_in.shape[3]) - mask = mask[:,area[2]:area[0] + area[2],area[3]:area[1] + area[3]] * mask_strength - mask = mask.unsqueeze(1).repeat(input_x.shape[0] // mask.shape[0], input_x.shape[1], 1, 1) - else: - mask = torch.ones_like(input_x) - mult = mask * strength - - if 'mask' not in conds: - rr = 8 - if area[2] != 0: - for t in range(rr): - mult[:,:,t:1+t,:] *= ((1.0/rr) * (t + 1)) - if (area[0] + area[2]) < x_in.shape[2]: - for t in range(rr): - mult[:,:,area[0] - 1 - t:area[0] - t,:] *= ((1.0/rr) * (t + 1)) - if area[3] != 0: - for t in range(rr): - mult[:,:,:,t:1+t] *= ((1.0/rr) * (t + 1)) - if (area[1] + area[3]) < x_in.shape[3]: - for t in range(rr): - mult[:,:,:,area[1] - 1 - t:area[1] - t] *= ((1.0/rr) * (t + 1)) - - conditioning = {} - model_conds = conds["model_conds"] - for c in model_conds: - conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) - - control = None - if 'control' in conds: - control = conds['control'] - - patches = None - if 'gligen' in conds: - gligen = conds['gligen'] - patches = {} - gligen_type = gligen[0] - gligen_model = gligen[1] - if gligen_type == "position": - gligen_patch = gligen_model.model.set_position(input_x.shape, gligen[2], input_x.device) - else: - gligen_patch = gligen_model.model.set_empty(input_x.shape, input_x.device) - - patches['middle_patch'] = [gligen_patch] - - return (input_x, mult, conditioning, area, control, patches) - - def cond_equal_size(c1, c2): - if c1 is c2: - return True - if c1.keys() != c2.keys(): - return False - for k in c1: - if not c1[k].can_concat(c2[k]): - return False - return True - - def can_concat_cond(c1, c2): - if c1[0].shape != c2[0].shape: - return False - - #control - if (c1[4] is None) != (c2[4] is None): - return False - if c1[4] is not None: - if c1[4] is not c2[4]: - return False - - #patches - if (c1[5] is None) != (c2[5] is None): - return False - if (c1[5] is not None): - if c1[5] is not c2[5]: - return False - - return cond_equal_size(c1[2], c2[2]) - - def cond_cat(c_list): - c_crossattn = [] - c_concat = [] - c_adm = [] - crossattn_max_len = 0 - - temp = {} - for x in c_list: - for k in x: - cur = temp.get(k, []) - cur.append(x[k]) - temp[k] = cur - - out = {} - for k in temp: - conds = temp[k] - out[k] = conds[0].concat(conds[1:]) - - return out - - def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): - out_cond = torch.zeros_like(x_in) - out_count = torch.ones_like(x_in) * 1e-37 - - out_uncond = torch.zeros_like(x_in) - out_uncond_count = torch.ones_like(x_in) * 1e-37 - - COND = 0 - UNCOND = 1 - - to_run = [] - for x in cond: - p = get_area_and_mult(x, x_in, timestep) - if p is None: - continue - - to_run += [(p, COND)] - if uncond is not None: - for x in uncond: - p = get_area_and_mult(x, x_in, timestep) - if p is None: - continue - - to_run += [(p, UNCOND)] - - while len(to_run) > 0: - first = to_run[0] - first_shape = first[0][0].shape - to_batch_temp = [] - for x in range(len(to_run)): - if can_concat_cond(to_run[x][0], first[0]): - to_batch_temp += [x] - - to_batch_temp.reverse() - to_batch = to_batch_temp[:1] - - free_memory = model_management.get_free_memory(x_in.device) - for i in range(1, len(to_batch_temp) + 1): - batch_amount = to_batch_temp[:len(to_batch_temp)//i] - input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:] - if model.memory_required(input_shape) < free_memory: - to_batch = batch_amount - break - - input_x = [] - mult = [] - c = [] - cond_or_uncond = [] - area = [] - control = None - patches = None - for x in to_batch: - o = to_run.pop(x) - p = o[0] - input_x += [p[0]] - mult += [p[1]] - c += [p[2]] - area += [p[3]] - cond_or_uncond += [o[1]] - control = p[4] - patches = p[5] - - batch_chunks = len(cond_or_uncond) - input_x = torch.cat(input_x) - c = cond_cat(c) - timestep_ = torch.cat([timestep] * batch_chunks) - - if control is not None: - c['control'] = control.get_control(input_x, timestep_, c, len(cond_or_uncond)) - - transformer_options = {} - if 'transformer_options' in model_options: - transformer_options = model_options['transformer_options'].copy() - - if patches is not None: - if "patches" in transformer_options: - cur_patches = transformer_options["patches"].copy() - for p in patches: - if p in cur_patches: - cur_patches[p] = cur_patches[p] + patches[p] - else: - cur_patches[p] = patches[p] + to_run += [(p, COND)] + if uncond is not None: + for x in uncond: + p = get_area_and_mult(x, x_in, timestep) + if p is None: + continue + + to_run += [(p, UNCOND)] + + while len(to_run) > 0: + first = to_run[0] + first_shape = first[0][0].shape + to_batch_temp = [] + for x in range(len(to_run)): + if can_concat_cond(to_run[x][0], first[0]): + to_batch_temp += [x] + + to_batch_temp.reverse() + to_batch = to_batch_temp[:1] + + free_memory = model_management.get_free_memory(x_in.device) + for i in range(1, len(to_batch_temp) + 1): + batch_amount = to_batch_temp[:len(to_batch_temp)//i] + input_shape = [len(batch_amount) * first_shape[0]] + list(first_shape)[1:] + if model.memory_required(input_shape) < free_memory: + to_batch = batch_amount + break + + input_x = [] + mult = [] + c = [] + cond_or_uncond = [] + area = [] + control = None + patches = None + for x in to_batch: + o = to_run.pop(x) + p = o[0] + input_x += [p[0]] + mult += [p[1]] + c += [p[2]] + area += [p[3]] + cond_or_uncond += [o[1]] + control = p[4] + patches = p[5] + + batch_chunks = len(cond_or_uncond) + input_x = torch.cat(input_x) + c = cond_cat(c) + timestep_ = torch.cat([timestep] * batch_chunks) + + if control is not None: + c['control'] = control.get_control(input_x, timestep_, c, len(cond_or_uncond)) + + transformer_options = {} + if 'transformer_options' in model_options: + transformer_options = model_options['transformer_options'].copy() + + if patches is not None: + if "patches" in transformer_options: + cur_patches = transformer_options["patches"].copy() + for p in patches: + if p in cur_patches: + cur_patches[p] = cur_patches[p] + patches[p] else: - transformer_options["patches"] = patches - - transformer_options["cond_or_uncond"] = cond_or_uncond[:] - transformer_options["sigmas"] = timestep + cur_patches[p] = patches[p] + else: + transformer_options["patches"] = patches - c['transformer_options'] = transformer_options + transformer_options["cond_or_uncond"] = cond_or_uncond[:] + transformer_options["sigmas"] = timestep - if 'model_function_wrapper' in model_options: - output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) - else: - output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) - del input_x + c['transformer_options'] = transformer_options - for o in range(batch_chunks): - if cond_or_uncond[o] == COND: - out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] - out_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o] - else: - out_uncond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] - out_uncond_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o] - del mult + if 'model_function_wrapper' in model_options: + output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) + else: + output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks) + del input_x - out_cond /= out_count - del out_count - out_uncond /= out_uncond_count - del out_uncond_count - return out_cond, out_uncond + for o in range(batch_chunks): + if cond_or_uncond[o] == COND: + out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] + out_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o] + else: + out_uncond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o] + out_uncond_count[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += mult[o] + del mult + out_cond /= out_count + del out_count + out_uncond /= out_uncond_count + del out_uncond_count + return out_cond, out_uncond - # if we're doing SAG, we still need to do uncond guidance, even though the cond and uncond will cancel out. - if math.isclose(cond_scale, 1.0) and "sag" not in model_options: - uncond = None +#The main sampling function shared by all the samplers +#Returns denoised +def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): + if math.isclose(cond_scale, 1.0): + uncond_ = None + else: + uncond_ = uncond - cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond, x, timestep, model_options) + cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options) cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale if "sampler_cfg_function" in model_options: args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} cfg_result = x - model_options["sampler_cfg_function"](args) - if "sag" in model_options: - assert uncond is not None, "SAG requires uncond guidance" - sag_scale = model_options["sag_scale"] - sag_sigma = model_options["sag_sigma"] - sag_threshold = model_options.get("sag_threshold", 1.0) - - # these methods are added by the sag patcher - uncond_attn = model.get_attn_scores() - mid_shape = model.get_mid_block_shape() - # create the adversarially blurred image - degraded = create_blur_map(uncond_pred, uncond_attn, mid_shape, sag_sigma, sag_threshold) - degraded_noised = degraded + x - uncond_pred - # call into the UNet - (sag, _) = calc_cond_uncond_batch(model, uncond, None, degraded_noised, timestep, model_options) - cfg_result += (degraded - sag) * sag_scale - return cfg_result - -def create_blur_map(x0, attn, mid_shape, sigma=3.0, threshold=1.0): - # reshape and GAP the attention map - _, hw1, hw2 = attn.shape - b, _, lh, lw = x0.shape - attn = attn.reshape(b, -1, hw1, hw2) - # Global Average Pool - mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold - # Reshape - mask = ( - mask.reshape(b, *mid_shape) - .unsqueeze(1) - .type(attn.dtype) - ) - # Upsample - mask = F.interpolate(mask, (lh, lw)) - - blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma) - blurred = blurred * mask + x0 * (1 - mask) - return blurred + for fn in model_options.get("sampler_post_cfg_function", []): + args = {"denoised": cfg_result, "cond": cond, "uncond": uncond, "model": model, "uncond_denoised": uncond_pred, "cond_denoised": cond_pred, + "sigma": timestep, "model_options": model_options, "input": x} + cfg_result = fn(args) -def gaussian_blur_2d(img, kernel_size, sigma): - ksize_half = (kernel_size - 1) * 0.5 - - x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) - - pdf = torch.exp(-0.5 * (x / sigma).pow(2)) - - x_kernel = pdf / pdf.sum() - x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) - - kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) - kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) - - padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] - - img = F.pad(img, padding, mode="reflect") - img = F.conv2d(img, kernel2d, groups=img.shape[-3]) - return img + return cfg_result class CFGNoisePredictor(torch.nn.Module): def __init__(self, model): diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 1ec0c93ac6c..4c609565ac4 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -1,8 +1,12 @@ import torch from torch import einsum +import torch.nn.functional as F +import math + from einops import rearrange, repeat import os from comfy.ldm.modules.attention import optimized_attention, _ATTN_PRECISION +import comfy.samplers # from comfy/ldm/modules/attention.py # but modified to return attention scores as well as output @@ -49,7 +53,49 @@ def attention_basic_with_sim(q, k, v, heads, mask=None): ) return (out, sim) -class SagNode: +def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): + # reshape and GAP the attention map + _, hw1, hw2 = attn.shape + b, _, lh, lw = x0.shape + attn = attn.reshape(b, -1, hw1, hw2) + # Global Average Pool + mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold + ratio = round(math.sqrt(lh * lw / hw1)) + mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] + + # Reshape + mask = ( + mask.reshape(b, *mid_shape) + .unsqueeze(1) + .type(attn.dtype) + ) + # Upsample + mask = F.interpolate(mask, (lh, lw)) + + blurred = gaussian_blur_2d(x0, kernel_size=9, sigma=sigma) + blurred = blurred * mask + x0 * (1 - mask) + return blurred + +def gaussian_blur_2d(img, kernel_size, sigma): + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + + x_kernel = pdf / pdf.sum() + x_kernel = x_kernel.to(device=img.device, dtype=img.dtype) + + kernel2d = torch.mm(x_kernel[:, None], x_kernel[None, :]) + kernel2d = kernel2d.expand(img.shape[-3], 1, kernel2d.shape[0], kernel2d.shape[1]) + + padding = [kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2] + + img = F.pad(img, padding, mode="reflect") + img = F.conv2d(img, kernel2d, groups=img.shape[-3]) + return img + +class SelfAttentionGuidance: @classmethod def INPUT_TYPES(s): return {"required": { "model": ("MODEL",), @@ -63,15 +109,9 @@ def INPUT_TYPES(s): def patch(self, model, scale, blur_sigma): m = model.clone() - # set extra options on the model - m.model_options["sag"] = True - m.model_options["sag_scale"] = scale - m.model_options["sag_sigma"] = blur_sigma - + attn_scores = None mid_block_shape = None - m.model.get_attn_scores = lambda: attn_scores - m.model.get_mid_block_shape = lambda: mid_block_shape # TODO: make this work properly with chunked batches # currently, we can only save the attn from one UNet call @@ -92,24 +132,41 @@ def attn_and_record(q, k, v, extra_options): else: return optimized_attention(q, k, v, heads=heads) + def post_cfg_function(args): + nonlocal attn_scores + nonlocal mid_block_shape + uncond_attn = attn_scores + + sag_scale = scale + sag_sigma = blur_sigma + sag_threshold = 1.0 + model = args["model"] + uncond_pred = args["uncond_denoised"] + uncond = args["uncond"] + cfg_result = args["denoised"] + sigma = args["sigma"] + model_options = args["model_options"] + x = args["input"] + + # create the adversarially blurred image + degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold) + degraded_noised = degraded + x - uncond_pred + # call into the UNet + (sag, _) = comfy.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options) + return cfg_result + (degraded - sag) * sag_scale + + m.set_model_sampler_post_cfg_function(post_cfg_function) + # from diffusers: # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch - def set_model_patch_replace(patch, name, key): - to = m.model_options["transformer_options"] - if "patches_replace" not in to: - to["patches_replace"] = {} - if name not in to["patches_replace"]: - to["patches_replace"][name] = {} - to["patches_replace"][name][key] = patch - set_model_patch_replace(attn_and_record, "attn1", ("middle", 0, 0)) - # from diffusers: - # unet.mid_block.attentions[0].register_forward_hook() - def forward_hook(m, inp, out): - nonlocal mid_block_shape - mid_block_shape = out[0].shape[-2:] - m.model.diffusion_model.middle_block[0].register_forward_hook(forward_hook) + m.set_model_attn1_replace(attn_and_record, "middle", 0, 0) + return (m, ) NODE_CLASS_MAPPINGS = { - "Self-Attention Guidance": SagNode, + "SelfAttentionGuidance": SelfAttentionGuidance, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "SelfAttentionGuidance": "Self-Attention Guidance", } From 6c5990f7dba2d5d0ad04c7ed5a702b067926cbe2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 13 Dec 2023 20:28:04 -0500 Subject: [PATCH 312/420] Fix cfg being calculated more than once if sampler_cfg_function. --- comfy/samplers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 106e7287656..7dc27528aa4 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -250,10 +250,11 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option uncond_ = uncond cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options) - cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale if "sampler_cfg_function" in model_options: args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} cfg_result = x - model_options["sampler_cfg_function"](args) + else: + cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale for fn in model_options.get("sampler_post_cfg_function", []): args = {"denoised": cfg_result, "cond": cond, "uncond": uncond, "model": model, "uncond_denoised": uncond_pred, "cond_denoised": cond_pred, From 329c57199302f6b9ccfebb86c96e937c386da92f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 14 Dec 2023 11:41:49 -0500 Subject: [PATCH 313/420] Improve code legibility. --- comfy/samplers.py | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 7dc27528aa4..39bc3774a4c 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -2,6 +2,7 @@ from .extra_samplers import uni_pc import torch import enum +import collections from comfy import model_management import math from comfy import model_base @@ -61,9 +62,7 @@ def get_area_and_mult(conds, x_in, timestep_in): for c in model_conds: conditioning[c] = model_conds[c].process_cond(batch_size=x_in.shape[0], device=x_in.device, area=area) - control = None - if 'control' in conds: - control = conds['control'] + control = conds.get('control', None) patches = None if 'gligen' in conds: @@ -78,7 +77,8 @@ def get_area_and_mult(conds, x_in, timestep_in): patches['middle_patch'] = [gligen_patch] - return (input_x, mult, conditioning, area, control, patches) + cond_obj = collections.namedtuple('cond_obj', ['input_x', 'mult', 'conditioning', 'area', 'control', 'patches']) + return cond_obj(input_x, mult, conditioning, area, control, patches) def cond_equal_size(c1, c2): if c1 is c2: @@ -91,24 +91,24 @@ def cond_equal_size(c1, c2): return True def can_concat_cond(c1, c2): - if c1[0].shape != c2[0].shape: + if c1.input_x.shape != c2.input_x.shape: return False - #control - if (c1[4] is None) != (c2[4] is None): - return False - if c1[4] is not None: - if c1[4] is not c2[4]: + def objects_concatable(obj1, obj2): + if (obj1 is None) != (obj2 is None): return False + if obj1 is not None: + if obj1 is not obj2: + return False + return True - #patches - if (c1[5] is None) != (c2[5] is None): + if not objects_concatable(c1.control, c2.control): + return False + + if not objects_concatable(c1.patches, c2.patches): return False - if (c1[5] is not None): - if c1[5] is not c2[5]: - return False - return cond_equal_size(c1[2], c2[2]) + return cond_equal_size(c1.conditioning, c2.conditioning) def cond_cat(c_list): c_crossattn = [] @@ -184,13 +184,13 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): for x in to_batch: o = to_run.pop(x) p = o[0] - input_x += [p[0]] - mult += [p[1]] - c += [p[2]] - area += [p[3]] - cond_or_uncond += [o[1]] - control = p[4] - patches = p[5] + input_x.append(p.input_x) + mult.append(p.mult) + c.append(p.conditioning) + area.append(p.area) + cond_or_uncond.append(o[1]) + control = p.control + patches = p.patches batch_chunks = len(cond_or_uncond) input_x = torch.cat(input_x) From b12b48e170ccff156dc6ec11242bb6af7d8437fd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 14 Dec 2023 20:11:46 -0500 Subject: [PATCH 314/420] cleanup. --- comfy_extras/nodes_sag.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 4c609565ac4..0bcda84f08a 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -111,7 +111,6 @@ def patch(self, model, scale, blur_sigma): m = model.clone() attn_scores = None - mid_block_shape = None # TODO: make this work properly with chunked batches # currently, we can only save the attn from one UNet call @@ -134,7 +133,6 @@ def attn_and_record(q, k, v, extra_options): def post_cfg_function(args): nonlocal attn_scores - nonlocal mid_block_shape uncond_attn = attn_scores sag_scale = scale From a5056cfb1f41f1f9e6fcd523ef8091e6e7cd6e3b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 15 Dec 2023 01:28:16 -0500 Subject: [PATCH 315/420] Remove useless code. --- comfy/ldm/modules/attention.py | 4 +--- comfy_extras/nodes_sag.py | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 8d86aa53d2e..3e12886b07f 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -104,9 +104,7 @@ def attention_basic(q, k, v, heads, mask=None): # force cast to fp32 to avoid overflowing if _ATTN_PRECISION =="fp32": - with torch.autocast(enabled=False, device_type = 'cuda'): - q, k = q.float(), k.float() - sim = einsum('b i d, b j d -> b i j', q, k) * scale + sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale else: sim = einsum('b i d, b j d -> b i j', q, k) * scale diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 0bcda84f08a..7e293ef63e4 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -27,9 +27,7 @@ def attention_basic_with_sim(q, k, v, heads, mask=None): # force cast to fp32 to avoid overflowing if _ATTN_PRECISION =="fp32": - with torch.autocast(enabled=False, device_type = 'cuda'): - q, k = q.float(), k.float() - sim = einsum('b i d, b j d -> b i j', q, k) * scale + sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale else: sim = einsum('b i d, b j d -> b i j', q, k) * scale From 574363a8a69cb48db71c96d03fe056d56853f4f6 Mon Sep 17 00:00:00 2001 From: Hari Date: Sat, 16 Dec 2023 00:28:16 +0530 Subject: [PATCH 316/420] Implement Perp-Neg --- comfy/samplers.py | 3 +- comfy_extras/nodes_perpneg.py | 58 +++++++++++++++++++++++++++++++++++ nodes.py | 1 + 3 files changed, 61 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_perpneg.py diff --git a/comfy/samplers.py b/comfy/samplers.py index 39bc3774a4c..35c9ccf0592 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -251,7 +251,8 @@ def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_option cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options) if "sampler_cfg_function" in model_options: - args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep} + args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep, + "cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options} cfg_result = x - model_options["sampler_cfg_function"](args) else: cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py new file mode 100644 index 00000000000..36f2eb01a51 --- /dev/null +++ b/comfy_extras/nodes_perpneg.py @@ -0,0 +1,58 @@ +import torch +import comfy.model_management +import comfy.sample +import comfy.samplers +import comfy.utils + + +class PerpNeg: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL", ), + "clip": ("CLIP", ), + "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), + }} + RETURN_TYPES = ("MODEL",) + FUNCTION = "patch" + + CATEGORY = "_for_testing" + + def patch(self, model, clip, neg_scale): + m = model.clone() + + tokens = clip.tokenize("") + nocond, nocond_pooled = clip.encode_from_tokens(tokens, return_pooled=True) + nocond = [[nocond, {"pooled_output": nocond_pooled}]] + nocond = comfy.sample.convert_cond(nocond) + + def cfg_function(args): + model = args["model"] + noise_pred_pos = args["cond_denoised"] + noise_pred_neg = args["uncond_denoised"] + cond_scale = args["cond_scale"] + x = args["input"] + sigma = args["sigma"] + model_options = args["model_options"] + + (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond, None, x, sigma, model_options) + + pos = noise_pred_pos - noise_pred_nocond + neg = noise_pred_neg - noise_pred_nocond + perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg + perp_neg = perp * neg_scale + cfg_result = noise_pred_nocond + cond_scale*(pos - perp_neg) + cfg_result = x - cfg_result + return cfg_result + + m.set_model_sampler_cfg_function(cfg_function) + + return (m, ) + + +NODE_CLASS_MAPPINGS = { + "PerpNeg": PerpNeg, +} + +NODE_DISPLAY_NAME_MAPPINGS = { + "PerpNeg": "Perp-Neg", +} diff --git a/nodes.py b/nodes.py index 3d24750cbfa..3031b10aad2 100644 --- a/nodes.py +++ b/nodes.py @@ -1868,6 +1868,7 @@ def init_custom_nodes(): "nodes_images.py", "nodes_video_model.py", "nodes_sag.py", + "nodes_perpneg.py", ] for node_file in extras_files: From 9cad2f06ff93e3ac512f7f008c11026530900b51 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 15 Dec 2023 14:40:57 -0500 Subject: [PATCH 317/420] Make perp neg take a conditioning input instead of a CLIP one. --- comfy_extras/nodes_perpneg.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index 36f2eb01a51..0c5ccb77a81 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -9,7 +9,7 @@ class PerpNeg: @classmethod def INPUT_TYPES(s): return {"required": {"model": ("MODEL", ), - "clip": ("CLIP", ), + "empty_conditioning": ("CONDITIONING", ), "neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}), }} RETURN_TYPES = ("MODEL",) @@ -17,13 +17,9 @@ def INPUT_TYPES(s): CATEGORY = "_for_testing" - def patch(self, model, clip, neg_scale): + def patch(self, model, empty_conditioning, neg_scale): m = model.clone() - - tokens = clip.tokenize("") - nocond, nocond_pooled = clip.encode_from_tokens(tokens, return_pooled=True) - nocond = [[nocond, {"pooled_output": nocond_pooled}]] - nocond = comfy.sample.convert_cond(nocond) + nocond = comfy.sample.convert_cond(empty_conditioning) def cfg_function(args): model = args["model"] @@ -33,9 +29,9 @@ def cfg_function(args): x = args["input"] sigma = args["sigma"] model_options = args["model_options"] - + (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond, None, x, sigma, model_options) - + pos = noise_pred_pos - noise_pred_nocond neg = noise_pred_neg - noise_pred_nocond perp = ((torch.mul(pos, neg).sum())/(torch.norm(neg)**2)) * neg From 014c8bf2f227eea118eb2f232962647289314853 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 15 Dec 2023 15:26:12 -0500 Subject: [PATCH 318/420] Refactor LCM to support more model types. --- comfy_extras/nodes_model_advanced.py | 46 +++++----------------------- 1 file changed, 8 insertions(+), 38 deletions(-) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index efcdf1932e4..83ef73c701a 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -17,41 +17,19 @@ def calculate_denoised(self, sigma, model_output, model_input): return c_out * x0 + c_skip * model_input -class ModelSamplingDiscreteDistilled(torch.nn.Module): +class ModelSamplingDiscreteDistilled(comfy.model_sampling.ModelSamplingDiscrete): original_timesteps = 50 - def __init__(self): - super().__init__() - self.sigma_data = 1.0 - timesteps = 1000 - beta_start = 0.00085 - beta_end = 0.012 + def __init__(self, model_config=None): + super().__init__(model_config) - betas = torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=torch.float32) ** 2 - alphas = 1.0 - betas - alphas_cumprod = torch.cumprod(alphas, dim=0) + self.skip_steps = self.num_timesteps // self.original_timesteps - self.skip_steps = timesteps // self.original_timesteps - - - alphas_cumprod_valid = torch.zeros((self.original_timesteps), dtype=torch.float32) + sigmas_valid = torch.zeros((self.original_timesteps), dtype=torch.float32) for x in range(self.original_timesteps): - alphas_cumprod_valid[self.original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps] - - sigmas = ((1 - alphas_cumprod_valid) / alphas_cumprod_valid) ** 0.5 - self.set_sigmas(sigmas) - - def set_sigmas(self, sigmas): - self.register_buffer('sigmas', sigmas) - self.register_buffer('log_sigmas', sigmas.log()) + sigmas_valid[self.original_timesteps - 1 - x] = self.sigmas[self.num_timesteps - 1 - x * self.skip_steps] - @property - def sigma_min(self): - return self.sigmas[0] - - @property - def sigma_max(self): - return self.sigmas[-1] + self.set_sigmas(sigmas_valid) def timestep(self, sigma): log_sigma = sigma.log() @@ -66,14 +44,6 @@ def sigma(self, timestep): log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx] return log_sigma.exp().to(timestep.device) - def percent_to_sigma(self, percent): - if percent <= 0.0: - return 999999999.9 - if percent >= 1.0: - return 0.0 - percent = 1.0 - percent - return self.sigma(torch.tensor(percent * 999.0)).item() - def rescale_zero_terminal_snr_sigmas(sigmas): alphas_cumprod = 1 / ((sigmas * sigmas) + 1) @@ -154,7 +124,7 @@ def patch(self, model, sampling, sigma_max, sigma_min): class ModelSamplingAdvanced(comfy.model_sampling.ModelSamplingContinuousEDM, sampling_type): pass - model_sampling = ModelSamplingAdvanced() + model_sampling = ModelSamplingAdvanced(model.model.model_config) model_sampling.set_sigma_range(sigma_min, sigma_max) m.add_object_patch("model_sampling", model_sampling) return (m, ) From adc40e3d7bc612e81874cf9f5738bda0e17ce0a3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 15 Dec 2023 15:46:23 -0500 Subject: [PATCH 319/420] Forgot this. --- comfy_extras/nodes_model_advanced.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_model_advanced.py b/comfy_extras/nodes_model_advanced.py index 83ef73c701a..541ce8fa5cc 100644 --- a/comfy_extras/nodes_model_advanced.py +++ b/comfy_extras/nodes_model_advanced.py @@ -92,7 +92,7 @@ def patch(self, model, sampling, zsnr): class ModelSamplingAdvanced(sampling_base, sampling_type): pass - model_sampling = ModelSamplingAdvanced() + model_sampling = ModelSamplingAdvanced(model.model.model_config) if zsnr: model_sampling.set_sigmas(rescale_zero_terminal_snr_sigmas(model_sampling.sigmas)) From 719fa0866fcd7744de3bf5ffd9ddd076f7c36b98 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 15 Dec 2023 18:53:08 -0500 Subject: [PATCH 320/420] Set clip vision model in eval mode so it works without inference mode. --- comfy/clip_vision.py | 1 + 1 file changed, 1 insertion(+) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index ba8a3a8d569..85b017e0c7e 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -39,6 +39,7 @@ def __init__(self, json_config): self.dtype = torch.float16 self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops.disable_weight_init) + self.model.eval() self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) def load_sd(self, sd): From 6596654d4792dae97831d429fcb095376f243a7c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 16 Dec 2023 01:21:00 -0500 Subject: [PATCH 321/420] Add a LatentBatch node. --- comfy_extras/nodes_latent.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index cedf39d6346..2eefc4c555d 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -3,9 +3,7 @@ def reshape_latent_to(target_shape, latent): if latent.shape[1:] != target_shape[1:]: - latent.movedim(1, -1) latent = comfy.utils.common_upscale(latent, target_shape[3], target_shape[2], "bilinear", "center") - latent.movedim(-1, 1) return comfy.utils.repeat_to_batch_size(latent, target_shape[0]) @@ -102,9 +100,32 @@ def op(self, samples1, samples2, ratio): samples_out["samples"] = st * (m1 * ratio + m2 * (1.0 - ratio)) return (samples_out,) +class LatentBatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples1": ("LATENT",), "samples2": ("LATENT",)}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "batch" + + CATEGORY = "latent/batch" + + def batch(self, samples1, samples2): + samples_out = samples1.copy() + s1 = samples1["samples"] + s2 = samples2["samples"] + + if s1.shape[1:] != s2.shape[1:]: + s2 = comfy.utils.common_upscale(s2, s1.shape[3], s1.shape[2], "bilinear", "center") + s = torch.cat((s1, s2), dim=0) + samples_out["samples"] = s + samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])]) + return (samples_out,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, "LatentInterpolate": LatentInterpolate, + "LatentBatch": LatentBatch, } From 172984db0175845c1a16bc3100fed0e46b42f604 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 16 Dec 2023 01:29:57 -0500 Subject: [PATCH 322/420] Fix SAG not working on certain resolutions. --- comfy_extras/nodes_sag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 7e293ef63e4..fea673d6c26 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -58,7 +58,7 @@ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): attn = attn.reshape(b, -1, hw1, hw2) # Global Average Pool mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold - ratio = round(math.sqrt(lh * lw / hw1)) + ratio = math.ceil(math.sqrt(lh * lw / hw1)) mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] # Reshape From 574efd3782c022fd00f55745d784207f6d318b15 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 16 Dec 2023 02:30:16 -0500 Subject: [PATCH 323/420] Fix perpneg not working on SDXL. --- comfy_extras/nodes_perpneg.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_perpneg.py b/comfy_extras/nodes_perpneg.py index 0c5ccb77a81..45e4d418f4f 100644 --- a/comfy_extras/nodes_perpneg.py +++ b/comfy_extras/nodes_perpneg.py @@ -29,8 +29,9 @@ def cfg_function(args): x = args["input"] sigma = args["sigma"] model_options = args["model_options"] + nocond_processed = comfy.samplers.encode_model_conds(model.extra_conds, nocond, x, x.device, "negative") - (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond, None, x, sigma, model_options) + (noise_pred_nocond, _) = comfy.samplers.calc_cond_uncond_batch(model, nocond_processed, None, x, sigma, model_options) pos = noise_pred_pos - noise_pred_nocond neg = noise_pred_neg - noise_pred_nocond From 13e6d5366e87ae76f517e1d79349e51fe92087b2 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 16 Dec 2023 02:47:26 -0500 Subject: [PATCH 324/420] Switch clip vision to manual cast. Make it use the same dtype as the text encoder. --- comfy/clip_vision.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 85b017e0c7e..a95616f1d04 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -34,11 +34,8 @@ def __init__(self, json_config): self.load_device = comfy.model_management.text_encoder_device() offload_device = comfy.model_management.text_encoder_offload_device() - self.dtype = torch.float32 - if comfy.model_management.should_use_fp16(self.load_device, prioritize_performance=False): - self.dtype = torch.float16 - - self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops.disable_weight_init) + self.dtype = comfy.model_management.text_encoder_dtype(self.load_device) + self.model = comfy.clip_model.CLIPVisionModelProjection(config, self.dtype, offload_device, comfy.ops.manual_cast) self.model.eval() self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) @@ -47,15 +44,8 @@ def load_sd(self, sd): def encode_image(self, image): comfy.model_management.load_model_gpu(self.patcher) - pixel_values = clip_preprocess(image.to(self.load_device)) - - if self.dtype != torch.float32: - precision_scope = torch.autocast - else: - precision_scope = lambda a, b: contextlib.nullcontext(a) - - with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32): - out = self.model(pixel_values=pixel_values, intermediate_output=-2) + pixel_values = clip_preprocess(image.to(self.load_device)).float() + out = self.model(pixel_values=pixel_values, intermediate_output=-2) outputs = Output() outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device()) From e45d920ae392c608b9cfcb1f863cfc8688ebb518 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 16 Dec 2023 03:06:10 -0500 Subject: [PATCH 325/420] Don't resize clip vision image when the size is already good. --- comfy/clip_vision.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index a95616f1d04..4564fcfb2a0 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -19,11 +19,13 @@ def __setitem__(self, key, item): def clip_preprocess(image, size=224): mean = torch.tensor([ 0.48145466,0.4578275,0.40821073], device=image.device, dtype=image.dtype) std = torch.tensor([0.26862954,0.26130258,0.27577711], device=image.device, dtype=image.dtype) - scale = (size / min(image.shape[1], image.shape[2])) - image = torch.nn.functional.interpolate(image.movedim(-1, 1), size=(round(scale * image.shape[1]), round(scale * image.shape[2])), mode="bicubic", antialias=True) - h = (image.shape[2] - size)//2 - w = (image.shape[3] - size)//2 - image = image[:,:,h:h+size,w:w+size] + image = image.movedim(-1, 1) + if not (image.shape[2] == size and image.shape[3] == size): + scale = (size / min(image.shape[2], image.shape[3])) + image = torch.nn.functional.interpolate(image, size=(round(scale * image.shape[2]), round(scale * image.shape[3])), mode="bicubic", antialias=True) + h = (image.shape[2] - size)//2 + w = (image.shape[3] - size)//2 + image = image[:,:,h:h+size,w:w+size] image = torch.clip((255. * image), 0, 255).round() / 255.0 return (image - mean.view([3,1,1])) / std.view([3,1,1]) From 6453dc1ca2d98d89af7cf312bb48d1e3fd2ca27f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 16 Dec 2023 14:16:12 +0000 Subject: [PATCH 326/420] Fix name counter preventing more than 3 of the same node Fix linked widget offset when populating values --- tests-ui/tests/groupNode.test.js | 32 ++++++++++++++++++++++++++++++++ web/extensions/core/groupNode.js | 9 +++++---- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/tests-ui/tests/groupNode.test.js b/tests-ui/tests/groupNode.test.js index 625890a0909..e6ebedd9150 100644 --- a/tests-ui/tests/groupNode.test.js +++ b/tests-ui/tests/groupNode.test.js @@ -970,4 +970,36 @@ describe("group node", () => { }); }); }); + test("converted inputs with linked widgets map values correctly on creation", async () => { + const { ez, graph, app } = await start(); + const k1 = ez.KSampler(); + const k2 = ez.KSampler(); + k1.widgets.seed.convertToInput(); + k2.widgets.seed.convertToInput(); + + const rr = ez.Reroute(); + rr.outputs[0].connectTo(k1.inputs.seed); + rr.outputs[0].connectTo(k2.inputs.seed); + + const group = await convertToGroup(app, graph, "test", [k1, k2, rr]); + expect(group.widgets.steps.value).toBe(20); + expect(group.widgets.cfg.value).toBe(8); + expect(group.widgets.scheduler.value).toBe("normal"); + expect(group.widgets["KSampler steps"].value).toBe(20); + expect(group.widgets["KSampler cfg"].value).toBe(8); + expect(group.widgets["KSampler scheduler"].value).toBe("normal"); + }); + test("allow multiple of the same node type to be added", async () => { + const { ez, graph, app } = await start(); + const nodes = [...Array(10)].map(() => ez.ImageScaleBy()); + const group = await convertToGroup(app, graph, "test", nodes); + expect(group.inputs.length).toBe(10); + expect(group.outputs.length).toBe(10); + expect(group.widgets.length).toBe(20); + expect(group.widgets.map((w) => w.widget.name)).toStrictEqual( + [...Array(10)] + .map((_, i) => `${i > 0 ? "ImageScaleBy " : ""}${i > 1 ? i + " " : ""}`) + .flatMap((p) => [`${p}upscale_method`, `${p}scale_by`]) + ); + }); }); diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index dc962ac2465..4cf1f7621b9 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -331,16 +331,17 @@ export class GroupNodeConfig { getInputConfig(node, inputName, seenInputs, config, extra) { let name = node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName; + let key = name; let prefix = ""; // Special handling for primitive to include the title if it is set rather than just "value" if ((node.type === "PrimitiveNode" && node.title) || name in seenInputs) { prefix = `${node.title ?? node.type} `; - name = `${prefix}${inputName}`; + key = name = `${prefix}${inputName}`; if (name in seenInputs) { name = `${prefix}${seenInputs[name]} ${inputName}`; } } - seenInputs[name] = (seenInputs[name] ?? 1) + 1; + seenInputs[key] = (seenInputs[key] ?? 1) + 1; if (inputName === "seed" || inputName === "noise_seed") { if (!extra) extra = {}; @@ -1010,10 +1011,10 @@ export class GroupNodeHandler { const newName = map[oldName]; const widgetIndex = this.node.widgets.findIndex((w) => w.name === newName); const mainWidget = this.node.widgets[widgetIndex]; - if (this.populatePrimitive(node, nodeId, oldName, i, linkedShift)) { + if (this.populatePrimitive(node, nodeId, oldName, i, linkedShift) || widgetIndex === -1) { // Find the inner widget and shift by the number of linked widgets as they will have been removed too const innerWidget = this.innerNodes[nodeId].widgets?.find((w) => w.name === oldName); - linkedShift += innerWidget.linkedWidgets?.length ?? 0; + linkedShift += innerWidget?.linkedWidgets?.length ?? 0; } if (widgetIndex === -1) { continue; From a036b940752fff830fde4108cd243a35df2fa1ee Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 17 Dec 2023 02:37:22 -0500 Subject: [PATCH 327/420] Move SaveAnimated nodes to image->animation. --- comfy_extras/nodes_images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index 5ad2235a523..aa80f5269a3 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -74,7 +74,7 @@ def INPUT_TYPES(s): OUTPUT_NODE = True - CATEGORY = "_for_testing" + CATEGORY = "image/animation" def save_images(self, images, fps, filename_prefix, lossless, quality, method, num_frames=0, prompt=None, extra_pnginfo=None): method = self.methods.get(method) @@ -136,7 +136,7 @@ def INPUT_TYPES(s): OUTPUT_NODE = True - CATEGORY = "_for_testing" + CATEGORY = "image/animation" def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): filename_prefix += self.prefix_append From 2f9d6a97ec7e3cb25beb13a320da8ec4573355d3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 17 Dec 2023 16:59:21 -0500 Subject: [PATCH 328/420] Add --deterministic option to make pytorch use deterministic algorithms. --- comfy/cli_args.py | 2 +- comfy/model_management.py | 4 ++++ main.py | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index d9c8668f470..8de0adb53ee 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -102,7 +102,7 @@ class LatentPreviewMethod(enum.Enum): parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.") - +parser.add_argument("--deterministic", action="store_true", help="Make pytorch use slower deterministic algorithms when it can. Note that this might not make images deterministic in all cases.") parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.") parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.") diff --git a/comfy/model_management.py b/comfy/model_management.py index b6a9471bfa1..23f39c98510 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -28,6 +28,10 @@ class CPUState(Enum): lowvram_available = True xpu_available = False +if args.deterministic: + print("Using deterministic algorithms for pytorch") + torch.use_deterministic_algorithms(True, warn_only=True) + directml_enabled = False if args.directml is not None: import torch_directml diff --git a/main.py b/main.py index 1f9c5f443c3..f6aeceed2af 100644 --- a/main.py +++ b/main.py @@ -64,6 +64,10 @@ def execute_script(script_path): os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) print("Set cuda device to:", args.cuda_device) + if args.deterministic: + if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ":4096:8" + import cuda_malloc import comfy.utils From 2258f851593fcb4bf34d22dddd3b7cb711db91ec Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 18 Dec 2023 03:18:40 -0500 Subject: [PATCH 329/420] Support stable zero 123 model. To use it use the ImageOnlyCheckpointLoader to load the checkpoint and the new Stable_Zero123 node. --- comfy/model_base.py | 30 ++++++++++++++++++ comfy/sample.py | 3 +- comfy/supported_models.py | 29 ++++++++++++++++- comfy_extras/nodes_stable3d.py | 58 ++++++++++++++++++++++++++++++++++ nodes.py | 1 + 5 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 comfy_extras/nodes_stable3d.py diff --git a/comfy/model_base.py b/comfy/model_base.py index a7582b330d9..c80848b27de 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -328,3 +328,33 @@ def extra_conds(self, **kwargs): out['image_only_indicator'] = comfy.conds.CONDConstant(torch.zeros((1,), device=device)) out['num_video_frames'] = comfy.conds.CONDConstant(noise.shape[0]) return out + +class Stable_Zero123(BaseModel): + def __init__(self, model_config, model_type=ModelType.EPS, device=None, cc_projection_weight=None, cc_projection_bias=None): + super().__init__(model_config, model_type, device=device) + self.cc_projection = comfy.ops.manual_cast.Linear(cc_projection_weight.shape[1], cc_projection_weight.shape[0], dtype=self.get_dtype(), device=device) + self.cc_projection.weight.copy_(cc_projection_weight) + self.cc_projection.bias.copy_(cc_projection_bias) + + def extra_conds(self, **kwargs): + out = {} + + latent_image = kwargs.get("concat_latent_image", None) + noise = kwargs.get("noise", None) + + if latent_image is None: + latent_image = torch.zeros_like(noise) + + if latent_image.shape[1:] != noise.shape[1:]: + latent_image = utils.common_upscale(latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + latent_image = utils.resize_to_batch_size(latent_image, noise.shape[0]) + + out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) + + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + if cross_attn.shape[-1] != 768: + cross_attn = self.cc_projection(cross_attn) + out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn) + return out diff --git a/comfy/sample.py b/comfy/sample.py index eadd6dcc864..4b0d15c49d1 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -47,7 +47,8 @@ def convert_cond(cond): temp = c[1].copy() model_conds = temp.get("model_conds", {}) if c[0] is not None: - model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) + model_conds["c_crossattn"] = comfy.conds.CONDCrossAttn(c[0]) #TODO: remove + temp["cross_attn"] = c[0] temp["model_conds"] = model_conds out.append(temp) return out diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 2f2dee871a2..251bf6ace86 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -252,5 +252,32 @@ def get_model(self, state_dict, prefix="", device=None): def clip_target(self): return None -models = [SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega] +class Stable_Zero123(supported_models_base.BASE): + unet_config = { + "context_dim": 768, + "model_channels": 320, + "use_linear_in_transformer": False, + "adm_in_channels": None, + "use_temporal_attention": False, + "in_channels": 8, + } + + unet_extra_config = { + "num_heads": 8, + "num_head_channels": -1, + } + + clip_vision_prefix = "cond_stage_model.model.visual." + + latent_format = latent_formats.SD15 + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.Stable_Zero123(self, device=device, cc_projection_weight=state_dict["cc_projection.weight"], cc_projection_bias=state_dict["cc_projection.bias"]) + return out + + def clip_target(self): + return None + + +models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py new file mode 100644 index 00000000000..fa64d9246c8 --- /dev/null +++ b/comfy_extras/nodes_stable3d.py @@ -0,0 +1,58 @@ +import torch +import nodes +import comfy.utils + +def camera_embeddings(elevation, azimuth): + elevation = torch.as_tensor([elevation]) + azimuth = torch.as_tensor([azimuth]) + embeddings = torch.stack( + [ + torch.deg2rad( + (90 - elevation) - (90) + ), # Zero123 polar is 90-elevation + torch.sin(torch.deg2rad(azimuth)), + torch.cos(torch.deg2rad(azimuth)), + torch.deg2rad( + 90 - torch.full_like(elevation, 0) + ), + ], dim=-1).unsqueeze(1) + + return embeddings + + +class Zero123_Conditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/3d_models" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + cam_embeds = camera_embeddings(elevation, azimuth) + cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + + positive = [[cond, {"concat_latent_image": t}]] + negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (positive, negative, {"samples":latent}) + +NODE_CLASS_MAPPINGS = { + "Zero123_Conditioning": Zero123_Conditioning, +} diff --git a/nodes.py b/nodes.py index 3031b10aad2..7ed7a8e4a15 100644 --- a/nodes.py +++ b/nodes.py @@ -1869,6 +1869,7 @@ def init_custom_nodes(): "nodes_video_model.py", "nodes_sag.py", "nodes_perpneg.py", + "nodes_stable3d.py", ] for node_file in extras_files: From d2f322902cf33c7235e7982900b5a88d55d5ecd1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 18 Dec 2023 03:59:50 -0500 Subject: [PATCH 330/420] Fix wrong Stable Zero123 node name. --- comfy_extras/nodes_stable3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py index fa64d9246c8..c6791d8de2a 100644 --- a/comfy_extras/nodes_stable3d.py +++ b/comfy_extras/nodes_stable3d.py @@ -20,7 +20,7 @@ def camera_embeddings(elevation, azimuth): return embeddings -class Zero123_Conditioning: +class StableZero123_Conditioning: @classmethod def INPUT_TYPES(s): return {"required": { "clip_vision": ("CLIP_VISION",), @@ -54,5 +54,5 @@ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevat return (positive, negative, {"samples":latent}) NODE_CLASS_MAPPINGS = { - "Zero123_Conditioning": Zero123_Conditioning, + "StableZero123_Conditioning": StableZero123_Conditioning, } From 8cf1daa108400f2e29188fa0b4404d6ebc83b864 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 18 Dec 2023 12:54:23 -0500 Subject: [PATCH 331/420] Fix SDXL area composition sometimes not using the right pooled output. --- comfy/model_base.py | 10 ++++++++++ comfy/samplers.py | 7 ++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index c80848b27de..f2a6f9841f8 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -126,9 +126,15 @@ def blank_inpaint_image_like(latent_image): cond_concat.append(blank_inpaint_image_like(noise)) data = torch.cat(cond_concat, dim=1) out['c_concat'] = comfy.conds.CONDNoiseShape(data) + adm = self.encode_adm(**kwargs) if adm is not None: out['y'] = comfy.conds.CONDRegular(adm) + + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn) + return out def load_model_weights(self, sd, unet_prefix=""): @@ -322,6 +328,10 @@ def extra_conds(self, **kwargs): out['c_concat'] = comfy.conds.CONDNoiseShape(latent_image) + cross_attn = kwargs.get("cross_attn", None) + if cross_attn is not None: + out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn) + if "time_conditioning" in kwargs: out["time_context"] = comfy.conds.CONDCrossAttn(kwargs["time_conditioning"]) diff --git a/comfy/samplers.py b/comfy/samplers.py index 35c9ccf0592..18bd75ef15c 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -599,6 +599,10 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model calculate_start_end_timesteps(model, negative) calculate_start_end_timesteps(model, positive) + if hasattr(model, 'extra_conds'): + positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) + negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) + #make sure each cond area has an opposite one with the same area for c in positive: create_cond_with_same_area_if_none(negative, c) @@ -613,9 +617,6 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model if latent_image is not None: latent_image = model.process_latent_in(latent_image) - if hasattr(model, 'extra_conds'): - positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} From 571ea8cdcc2d1bf4fa7f398dad68415dacfff02f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 18 Dec 2023 17:03:32 -0500 Subject: [PATCH 332/420] Fix SAG not working with cfg 1.0 --- comfy/model_patcher.py | 8 ++++++-- comfy/samplers.py | 2 +- comfy_extras/nodes_sag.py | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index e0acdc961ee..6acb2d647c0 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -55,14 +55,18 @@ def is_clone(self, other): def memory_required(self, input_shape): return self.model.memory_required(input_shape=input_shape) - def set_model_sampler_cfg_function(self, sampler_cfg_function): + def set_model_sampler_cfg_function(self, sampler_cfg_function, disable_cfg1_optimization=False): if len(inspect.signature(sampler_cfg_function).parameters) == 3: self.model_options["sampler_cfg_function"] = lambda args: sampler_cfg_function(args["cond"], args["uncond"], args["cond_scale"]) #Old way else: self.model_options["sampler_cfg_function"] = sampler_cfg_function + if disable_cfg1_optimization: + self.model_options["disable_cfg1_optimization"] = True - def set_model_sampler_post_cfg_function(self, post_cfg_function): + def set_model_sampler_post_cfg_function(self, post_cfg_function, disable_cfg1_optimization=False): self.model_options["sampler_post_cfg_function"] = self.model_options.get("sampler_post_cfg_function", []) + [post_cfg_function] + if disable_cfg1_optimization: + self.model_options["disable_cfg1_optimization"] = True def set_model_unet_function_wrapper(self, unet_wrapper_function): self.model_options["model_function_wrapper"] = unet_wrapper_function diff --git a/comfy/samplers.py b/comfy/samplers.py index 18bd75ef15c..47f3477876b 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -244,7 +244,7 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options): #The main sampling function shared by all the samplers #Returns denoised def sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options={}, seed=None): - if math.isclose(cond_scale, 1.0): + if math.isclose(cond_scale, 1.0) and model_options.get("disable_cfg1_optimization", False) == False: uncond_ = None else: uncond_ = uncond diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index fea673d6c26..450ac3eeacd 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -151,7 +151,7 @@ def post_cfg_function(args): (sag, _) = comfy.samplers.calc_cond_uncond_batch(model, uncond, None, degraded_noised, sigma, model_options) return cfg_result + (degraded - sag) * sag_scale - m.set_model_sampler_post_cfg_function(post_cfg_function) + m.set_model_sampler_post_cfg_function(post_cfg_function, disable_cfg1_optimization=True) # from diffusers: # unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch From 9a7619b72de8e9e6cbc2818d4deef0914539fbe3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 19 Dec 2023 02:32:59 -0500 Subject: [PATCH 333/420] Fix regression with inpaint model. --- comfy/samplers.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 47f3477876b..0453c1f6fda 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -599,6 +599,9 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model calculate_start_end_timesteps(model, negative) calculate_start_end_timesteps(model, positive) + if latent_image is not None: + latent_image = model.process_latent_in(latent_image) + if hasattr(model, 'extra_conds'): positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) @@ -614,10 +617,6 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model apply_empty_x_to_equal_area(list(filter(lambda c: c.get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) - if latent_image is not None: - latent_image = model.process_latent_in(latent_image) - - extra_args = {"cond":positive, "uncond":negative, "cond_scale": cfg, "model_options": model_options, "seed":seed} samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) From 40ea2bd01113f9fd46be8e6a61cded204155f9a4 Mon Sep 17 00:00:00 2001 From: Oleksiy Nehlyadyuk Date: Tue, 19 Dec 2023 17:07:55 +0300 Subject: [PATCH 334/420] Update requirements.txt the UI launches with one missing module `torchvision`. spits out a `ModuleNotFoundError`. installing `torchvision` module fixed it. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 14524485a28..b698f2febcb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ torch torchsde +torchvision einops transformers>=4.25.1 safetensors>=0.3.0 From e65110fd93a3f9e4c378e87b26a9fc6c5c68cc2d Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 19 Dec 2023 20:22:01 +0000 Subject: [PATCH 335/420] Fix dom widgets not being hidden --- web/scripts/domWidget.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index e919428a0b8..bb4c892b541 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -177,6 +177,7 @@ LGraphCanvas.prototype.computeVisibleNodes = function () { for (const w of node.widgets) { if (w.element) { w.element.hidden = hidden; + w.element.style.display = hidden ? "none" : null; if (hidden) { w.options.onHide?.(w); } From 8680ac3dfd51ab1276eb05d17ef8837e023f4a1f Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 19 Dec 2023 20:38:07 +0000 Subject: [PATCH 336/420] try to improve test reliability --- .github/workflows/test-ui.yaml | 2 +- tests-ui/afterSetup.js | 9 +++++++++ tests-ui/jest.config.js | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 tests-ui/afterSetup.js diff --git a/.github/workflows/test-ui.yaml b/.github/workflows/test-ui.yaml index 95069175517..4b8b9793479 100644 --- a/.github/workflows/test-ui.yaml +++ b/.github/workflows/test-ui.yaml @@ -22,5 +22,5 @@ jobs: run: | npm ci npm run test:generate - npm test + npm test -- --verbose working-directory: ./tests-ui diff --git a/tests-ui/afterSetup.js b/tests-ui/afterSetup.js new file mode 100644 index 00000000000..983f3af643c --- /dev/null +++ b/tests-ui/afterSetup.js @@ -0,0 +1,9 @@ +const { start } = require("./utils"); +const lg = require("./utils/litegraph"); + +// Load things once per test file before to ensure its all warmed up for the tests +beforeAll(async () => { + lg.setup(global); + await start({ resetEnv: true }); + lg.teardown(global); +}); diff --git a/tests-ui/jest.config.js b/tests-ui/jest.config.js index b5a5d646da7..86fff50574b 100644 --- a/tests-ui/jest.config.js +++ b/tests-ui/jest.config.js @@ -2,8 +2,10 @@ const config = { testEnvironment: "jsdom", setupFiles: ["./globalSetup.js"], + setupFilesAfterEnv: ["./afterSetup.js"], clearMocks: true, resetModules: true, + testTimeout: 10000 }; module.exports = config; From e82942cc293a7f707f3ba5611e33ec2284278268 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 20 Dec 2023 02:51:18 -0500 Subject: [PATCH 337/420] Add a denoise parameter to the SDTurboScheduler. --- comfy_extras/nodes_custom_sampler.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 008d0b8d6be..8791d8ae3c4 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -87,6 +87,7 @@ def INPUT_TYPES(s): return {"required": {"model": ("MODEL",), "steps": ("INT", {"default": 1, "min": 1, "max": 10}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}), } } RETURN_TYPES = ("SIGMAS",) @@ -94,8 +95,9 @@ def INPUT_TYPES(s): FUNCTION = "get_sigmas" - def get_sigmas(self, model, steps): - timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[:steps] + def get_sigmas(self, model, steps, denoise): + start_step = 10 - int(10 * denoise) + timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps] sigmas = model.model.model_sampling.sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) return (sigmas, ) From 5f54614e7fa8a7ae493c7ac8a8c0677970cac908 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 20 Dec 2023 16:22:18 -0500 Subject: [PATCH 338/420] Add a RebatchImages node. --- comfy_extras/nodes_rebatch.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_rebatch.py b/comfy_extras/nodes_rebatch.py index 88a4ebe29f6..3010fbd4b69 100644 --- a/comfy_extras/nodes_rebatch.py +++ b/comfy_extras/nodes_rebatch.py @@ -99,10 +99,40 @@ def rebatch(self, latents, batch_size): return (output_list,) +class ImageRebatch: + @classmethod + def INPUT_TYPES(s): + return {"required": { "images": ("IMAGE",), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + }} + RETURN_TYPES = ("IMAGE",) + INPUT_IS_LIST = True + OUTPUT_IS_LIST = (True, ) + + FUNCTION = "rebatch" + + CATEGORY = "image/batch" + + def rebatch(self, images, batch_size): + batch_size = batch_size[0] + + output_list = [] + all_images = [] + for img in images: + for i in range(img.shape[0]): + all_images.append(img[i:i+1]) + + for i in range(0, len(all_images), batch_size): + output_list.append(torch.cat(all_images[i:i+batch_size], dim=0)) + + return (output_list,) + NODE_CLASS_MAPPINGS = { "RebatchLatents": LatentRebatch, + "RebatchImages": ImageRebatch, } NODE_DISPLAY_NAME_MAPPINGS = { "RebatchLatents": "Rebatch Latents", -} \ No newline at end of file + "RebatchImages": "Rebatch Images", +} From a1e1c69f7d555ae281ec46ca7a40c7195f3a249c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 20 Dec 2023 16:39:09 -0500 Subject: [PATCH 339/420] LoadImage now loads all the frames from animated images as a batch. --- nodes.py | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/nodes.py b/nodes.py index 7ed7a8e4a15..027bf55d994 100644 --- a/nodes.py +++ b/nodes.py @@ -9,7 +9,7 @@ import time import random -from PIL import Image, ImageOps +from PIL import Image, ImageOps, ImageSequence from PIL.PngImagePlugin import PngInfo import numpy as np import safetensors.torch @@ -1410,17 +1410,30 @@ def INPUT_TYPES(s): FUNCTION = "load_image" def load_image(self, image): image_path = folder_paths.get_annotated_filepath(image) - i = Image.open(image_path) - i = ImageOps.exif_transpose(i) - image = i.convert("RGB") - image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - if 'A' in i.getbands(): - mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 - mask = 1. - torch.from_numpy(mask) + img = Image.open(image_path) + output_images = [] + output_masks = [] + for i in ImageSequence.Iterator(img): + i = ImageOps.exif_transpose(i) + image = i.convert("RGB") + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + if 'A' in i.getbands(): + mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 + mask = 1. - torch.from_numpy(mask) + else: + mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") + output_images.append(image) + output_masks.append(mask.unsqueeze(0)) + + if len(output_images) > 1: + output_image = torch.cat(output_images, dim=0) + output_mask = torch.cat(output_masks, dim=0) else: - mask = torch.zeros((64,64), dtype=torch.float32, device="cpu") - return (image, mask.unsqueeze(0)) + output_image = output_images[0] + output_mask = output_masks[0] + + return (output_image, output_mask) @classmethod def IS_CHANGED(s, image): From 6781b181ef8ab8101e6bdf45a580509d6e4e1f7e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 21 Dec 2023 02:35:01 -0500 Subject: [PATCH 340/420] Fix potential tensor device issue with ImageCompositeMasked. --- comfy_extras/nodes_mask.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_mask.py b/comfy_extras/nodes_mask.py index d8c65c2b6b9..a7d164bf71d 100644 --- a/comfy_extras/nodes_mask.py +++ b/comfy_extras/nodes_mask.py @@ -6,6 +6,7 @@ from nodes import MAX_RESOLUTION def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False): + source = source.to(destination.device) if resize_source: source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear") @@ -20,7 +21,7 @@ def composite(destination, source, x, y, mask = None, multiplier = 8, resize_sou if mask is None: mask = torch.ones_like(source) else: - mask = mask.clone() + mask = mask.to(destination.device, copy=True) mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear") mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0]) From d35267e85a865c30a5fa63fdb0a21f94f4cc37e7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 21 Dec 2023 13:21:25 -0500 Subject: [PATCH 341/420] Litegraph updates. Update from upstream repo. Auto select value in prompt. Increase maximum number of nodes to 10k. --- web/lib/litegraph.core.js | 45 +++++++++++++++++++++++++++++++-------- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index f571edb30b8..434c4a83bf1 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -48,7 +48,7 @@ EVENT_LINK_COLOR: "#A86", CONNECTING_LINK_COLOR: "#AFA", - MAX_NUMBER_OF_NODES: 1000, //avoid infinite loops + MAX_NUMBER_OF_NODES: 10000, //avoid infinite loops DEFAULT_POSITION: [100, 100], //default node position VALID_SHAPES: ["default", "box", "round", "card"], //,"circle" @@ -3788,16 +3788,42 @@ /** * returns the bounding of the object, used for rendering purposes - * bounding is: [topleft_cornerx, topleft_cornery, width, height] * @method getBounding - * @return {Float32Array[4]} the total size + * @param out {Float32Array[4]?} [optional] a place to store the output, to free garbage + * @param compute_outer {boolean?} [optional] set to true to include the shadow and connection points in the bounding calculation + * @return {Float32Array[4]} the bounding box in format of [topleft_cornerx, topleft_cornery, width, height] */ - LGraphNode.prototype.getBounding = function(out) { + LGraphNode.prototype.getBounding = function(out, compute_outer) { out = out || new Float32Array(4); - out[0] = this.pos[0] - 4; - out[1] = this.pos[1] - LiteGraph.NODE_TITLE_HEIGHT; - out[2] = this.flags.collapsed ? (this._collapsed_width || LiteGraph.NODE_COLLAPSED_WIDTH) : this.size[0] + 4; - out[3] = this.flags.collapsed ? LiteGraph.NODE_TITLE_HEIGHT : this.size[1] + LiteGraph.NODE_TITLE_HEIGHT; + const nodePos = this.pos; + const isCollapsed = this.flags.collapsed; + const nodeSize = this.size; + + let left_offset = 0; + // 1 offset due to how nodes are rendered + let right_offset = 1 ; + let top_offset = 0; + let bottom_offset = 0; + + if (compute_outer) { + // 4 offset for collapsed node connection points + left_offset = 4; + // 6 offset for right shadow and collapsed node connection points + right_offset = 6 + left_offset; + // 4 offset for collapsed nodes top connection points + top_offset = 4; + // 5 offset for bottom shadow and collapsed node connection points + bottom_offset = 5 + top_offset; + } + + out[0] = nodePos[0] - left_offset; + out[1] = nodePos[1] - LiteGraph.NODE_TITLE_HEIGHT - top_offset; + out[2] = isCollapsed ? + (this._collapsed_width || LiteGraph.NODE_COLLAPSED_WIDTH) + right_offset : + nodeSize[0] + right_offset; + out[3] = isCollapsed ? + LiteGraph.NODE_TITLE_HEIGHT + bottom_offset : + nodeSize[1] + LiteGraph.NODE_TITLE_HEIGHT + bottom_offset; if (this.onBounding) { this.onBounding(out); @@ -7674,7 +7700,7 @@ LGraphNode.prototype.executeAction = function(action) continue; } - if (!overlapBounding(this.visible_area, n.getBounding(temp))) { + if (!overlapBounding(this.visible_area, n.getBounding(temp, true))) { continue; } //out of the visible area @@ -11336,6 +11362,7 @@ LGraphNode.prototype.executeAction = function(action) name_element.innerText = title; var value_element = dialog.querySelector(".value"); value_element.value = value; + value_element.select(); var input = value_element; input.addEventListener("keydown", function(e) { From 261bcbb0d933c3bf1fce02e6cc652936da2de1e0 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 22 Dec 2023 04:05:42 -0500 Subject: [PATCH 342/420] A few missing comfy ops in the VAE. --- comfy/ldm/models/autoencoder.py | 5 +++-- comfy/ldm/modules/diffusionmodules/model.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index d2f1d74a938..b91ec3249fb 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -8,6 +8,7 @@ from comfy.ldm.util import instantiate_from_config from comfy.ldm.modules.ema import LitEma +import comfy.ops class DiagonalGaussianRegularizer(torch.nn.Module): def __init__(self, sample: bool = True): @@ -161,12 +162,12 @@ def __init__(self, embed_dim: int, **kwargs): }, **kwargs, ) - self.quant_conv = torch.nn.Conv2d( + self.quant_conv = comfy.ops.disable_weight_init.Conv2d( (1 + ddconfig["double_z"]) * ddconfig["z_channels"], (1 + ddconfig["double_z"]) * embed_dim, 1, ) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.post_quant_conv = comfy.ops.disable_weight_init.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim def get_autoencoder_params(self) -> list: diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index fce29cb85ec..cc81c1f231c 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -41,7 +41,7 @@ def nonlinearity(x): def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + return ops.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) class Upsample(nn.Module): From 36a7953142ccf3f9debf9305e3cbeb3bfe956ee3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 22 Dec 2023 14:24:04 -0500 Subject: [PATCH 343/420] Greatly improve lowvram sampling speed by getting rid of accelerate. Let me know if this breaks anything. --- comfy/controlnet.py | 2 +- comfy/model_base.py | 6 +-- comfy/model_management.py | 52 ++++++++++++---------- comfy/ops.py | 92 ++++++++++++++++++++++++++++++--------- requirements.txt | 1 - 5 files changed, 103 insertions(+), 50 deletions(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 110b5c7c290..8404054f38f 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -283,7 +283,7 @@ class control_lora_ops(ControlLoraOps, comfy.ops.manual_cast): cm = self.control_model.state_dict() for k in sd: - weight = comfy.model_management.resolve_lowvram_weight(sd[k], diffusion_model, k) + weight = sd[k] try: comfy.utils.set_attr(self.control_model, k, weight) except: diff --git a/comfy/model_base.py b/comfy/model_base.py index f2a6f9841f8..b3a1fcd51f0 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -162,11 +162,7 @@ def process_latent_out(self, latent): def state_dict_for_saving(self, clip_state_dict, vae_state_dict): clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict) - unet_sd = self.diffusion_model.state_dict() - unet_state_dict = {} - for k in unet_sd: - unet_state_dict[k] = comfy.model_management.resolve_lowvram_weight(unet_sd[k], self.diffusion_model, k) - + unet_state_dict = self.diffusion_model.state_dict() unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict) vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict) if self.get_dtype() == torch.float16: diff --git a/comfy/model_management.py b/comfy/model_management.py index 23f39c98510..61c967f6451 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -218,15 +218,8 @@ def is_nvidia(): FORCE_FP16 = True if lowvram_available: - try: - import accelerate - if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): - vram_state = set_vram_to - except Exception as e: - import traceback - print(traceback.format_exc()) - print("ERROR: LOW VRAM MODE NEEDS accelerate.") - lowvram_available = False + if set_vram_to in (VRAMState.LOW_VRAM, VRAMState.NO_VRAM): + vram_state = set_vram_to if cpu_state != CPUState.GPU: @@ -298,8 +291,20 @@ def model_load(self, lowvram_model_memory=0): if lowvram_model_memory > 0: print("loading in lowvram mode", lowvram_model_memory/(1024 * 1024)) - device_map = accelerate.infer_auto_device_map(self.real_model, max_memory={0: "{}MiB".format(lowvram_model_memory // (1024 * 1024)), "cpu": "16GiB"}) - accelerate.dispatch_model(self.real_model, device_map=device_map, main_device=self.device) + mem_counter = 0 + for m in self.real_model.modules(): + if hasattr(m, "comfy_cast_weights"): + m.prev_comfy_cast_weights = m.comfy_cast_weights + m.comfy_cast_weights = True + module_mem = 0 + sd = m.state_dict() + for k in sd: + t = sd[k] + module_mem += t.nelement() * t.element_size() + if mem_counter + module_mem < lowvram_model_memory: + m.to(self.device) + mem_counter += module_mem + self.model_accelerated = True if is_intel_xpu() and not args.disable_ipex_optimize: @@ -309,7 +314,11 @@ def model_load(self, lowvram_model_memory=0): def model_unload(self): if self.model_accelerated: - accelerate.hooks.remove_hook_from_submodules(self.real_model) + for m in self.real_model.modules(): + if hasattr(m, "prev_comfy_cast_weights"): + m.comfy_cast_weights = m.prev_comfy_cast_weights + del m.prev_comfy_cast_weights + self.model_accelerated = False self.model.unpatch_model(self.model.offload_device) @@ -402,14 +411,14 @@ def load_models_gpu(models, memory_required=0): if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM): model_size = loaded_model.model_memory_required(torch_dev) current_free_mem = get_free_memory(torch_dev) - lowvram_model_memory = int(max(256 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 )) + lowvram_model_memory = int(max(64 * (1024 * 1024), (current_free_mem - 1024 * (1024 * 1024)) / 1.3 )) if model_size > (current_free_mem - inference_memory): #only switch to lowvram if really necessary vram_set_state = VRAMState.LOW_VRAM else: lowvram_model_memory = 0 if vram_set_state == VRAMState.NO_VRAM: - lowvram_model_memory = 256 * 1024 * 1024 + lowvram_model_memory = 64 * 1024 * 1024 cur_loaded_model = loaded_model.model_load(lowvram_model_memory) current_loaded_models.insert(0, loaded_model) @@ -566,6 +575,11 @@ def supports_dtype(device, dtype): #TODO return True return False +def device_supports_non_blocking(device): + if is_device_mps(device): + return False #pytorch bug? mps doesn't support non blocking + return True + def cast_to_device(tensor, device, dtype, copy=False): device_supports_cast = False if tensor.dtype == torch.float32 or tensor.dtype == torch.float16: @@ -576,9 +590,7 @@ def cast_to_device(tensor, device, dtype, copy=False): elif is_intel_xpu(): device_supports_cast = True - non_blocking = True - if is_device_mps(device): - non_blocking = False #pytorch bug? mps doesn't support non blocking + non_blocking = device_supports_non_blocking(device) if device_supports_cast: if copy: @@ -742,11 +754,7 @@ def soft_empty_cache(force=False): torch.cuda.empty_cache() torch.cuda.ipc_collect() -def resolve_lowvram_weight(weight, model, key): - if weight.device == torch.device("meta"): #lowvram NOTE: this depends on the inner working of the accelerate library so it might break. - key_split = key.split('.') # I have no idea why they don't just leave the weight there instead of using the meta device. - op = comfy.utils.get_attr(model, '.'.join(key_split[:-1])) - weight = op._hf_hook.weights_map[key_split[-1]] +def resolve_lowvram_weight(weight, model, key): #TODO: remove return weight #TODO: might be cleaner to put this somewhere else diff --git a/comfy/ops.py b/comfy/ops.py index 08c63384789..f6f85de60a1 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1,27 +1,93 @@ import torch from contextlib import contextmanager +import comfy.model_management + +def cast_bias_weight(s, input): + bias = None + non_blocking = comfy.model_management.device_supports_non_blocking(input.device) + if s.bias is not None: + bias = s.bias.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking) + weight = s.weight.to(device=input.device, dtype=input.dtype, non_blocking=non_blocking) + return weight, bias + class disable_weight_init: class Linear(torch.nn.Linear): + comfy_cast_weights = False def reset_parameters(self): return None + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.linear(input, weight, bias) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + class Conv2d(torch.nn.Conv2d): + comfy_cast_weights = False def reset_parameters(self): return None + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return self._conv_forward(input, weight, bias) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + class Conv3d(torch.nn.Conv3d): + comfy_cast_weights = False def reset_parameters(self): return None + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return self._conv_forward(input, weight, bias) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + class GroupNorm(torch.nn.GroupNorm): + comfy_cast_weights = False def reset_parameters(self): return None + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + + class LayerNorm(torch.nn.LayerNorm): + comfy_cast_weights = False def reset_parameters(self): return None + def forward_comfy_cast_weights(self, input): + weight, bias = cast_bias_weight(self, input) + return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + + def forward(self, *args, **kwargs): + if self.comfy_cast_weights: + return self.forward_comfy_cast_weights(*args, **kwargs) + else: + return super().forward(*args, **kwargs) + @classmethod def conv_nd(s, dims, *args, **kwargs): if dims == 2: @@ -31,35 +97,19 @@ def conv_nd(s, dims, *args, **kwargs): else: raise ValueError(f"unsupported dimensions: {dims}") -def cast_bias_weight(s, input): - bias = None - if s.bias is not None: - bias = s.bias.to(device=input.device, dtype=input.dtype) - weight = s.weight.to(device=input.device, dtype=input.dtype) - return weight, bias class manual_cast(disable_weight_init): class Linear(disable_weight_init.Linear): - def forward(self, input): - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.linear(input, weight, bias) + comfy_cast_weights = True class Conv2d(disable_weight_init.Conv2d): - def forward(self, input): - weight, bias = cast_bias_weight(self, input) - return self._conv_forward(input, weight, bias) + comfy_cast_weights = True class Conv3d(disable_weight_init.Conv3d): - def forward(self, input): - weight, bias = cast_bias_weight(self, input) - return self._conv_forward(input, weight, bias) + comfy_cast_weights = True class GroupNorm(disable_weight_init.GroupNorm): - def forward(self, input): - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps) + comfy_cast_weights = True class LayerNorm(disable_weight_init.LayerNorm): - def forward(self, input): - weight, bias = cast_bias_weight(self, input) - return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps) + comfy_cast_weights = True diff --git a/requirements.txt b/requirements.txt index 14524485a28..da1fbb27e0c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,6 @@ einops transformers>=4.25.1 safetensors>=0.3.0 aiohttp -accelerate pyyaml Pillow scipy From a252963f956a7d76344e3f0ce24b1047480a25af Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 23 Dec 2023 04:25:06 -0500 Subject: [PATCH 344/420] --disable-smart-memory now unloads everything like it did originally. --- comfy/model_management.py | 4 ++++ execution.py | 2 ++ 2 files changed, 6 insertions(+) diff --git a/comfy/model_management.py b/comfy/model_management.py index 61c967f6451..3adc42702c8 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -754,6 +754,10 @@ def soft_empty_cache(force=False): torch.cuda.empty_cache() torch.cuda.ipc_collect() +def unload_all_models(): + free_memory(1e30, get_torch_device()) + + def resolve_lowvram_weight(weight, model, key): #TODO: remove return weight diff --git a/execution.py b/execution.py index 7db1f095b10..7ad171313b0 100644 --- a/execution.py +++ b/execution.py @@ -382,6 +382,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): for x in executed: self.old_prompt[x] = copy.deepcopy(prompt[x]) self.server.last_node_id = None + if comfy.model_management.DISABLE_SMART_MEMORY: + comfy.model_management.unload_all_models() From d0165d819afe76bd4e6bdd710eb5f3e571b6a804 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 24 Dec 2023 07:06:59 -0500 Subject: [PATCH 345/420] Fix SVD lowvram mode. --- comfy/ldm/modules/diffusionmodules/util.py | 6 +++--- comfy/ldm/modules/temporal_ae.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/util.py b/comfy/ldm/modules/diffusionmodules/util.py index 68175b62a58..ac7e27173bd 100644 --- a/comfy/ldm/modules/diffusionmodules/util.py +++ b/comfy/ldm/modules/diffusionmodules/util.py @@ -51,9 +51,9 @@ def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor: if self.merge_strategy == "fixed": # make shape compatible # alpha = repeat(self.mix_factor, '1 -> b () t () ()', t=t, b=bs) - alpha = self.mix_factor + alpha = self.mix_factor.to(image_only_indicator.device) elif self.merge_strategy == "learned": - alpha = torch.sigmoid(self.mix_factor) + alpha = torch.sigmoid(self.mix_factor.to(image_only_indicator.device)) # make shape compatible # alpha = repeat(alpha, '1 -> s () ()', s = t * bs) elif self.merge_strategy == "learned_with_images": @@ -61,7 +61,7 @@ def get_alpha(self, image_only_indicator: torch.Tensor) -> torch.Tensor: alpha = torch.where( image_only_indicator.bool(), torch.ones(1, 1, device=image_only_indicator.device), - rearrange(torch.sigmoid(self.mix_factor), "... -> ... 1"), + rearrange(torch.sigmoid(self.mix_factor.to(image_only_indicator.device)), "... -> ... 1"), ) alpha = rearrange(alpha, self.rearrange_pattern) # make shape compatible diff --git a/comfy/ldm/modules/temporal_ae.py b/comfy/ldm/modules/temporal_ae.py index 7ea68dc9e28..2992aeafc35 100644 --- a/comfy/ldm/modules/temporal_ae.py +++ b/comfy/ldm/modules/temporal_ae.py @@ -82,14 +82,14 @@ def forward(self, x, temb, skip_video=False, timesteps=None): x = self.time_stack(x, temb) - alpha = self.get_alpha(bs=b // timesteps) + alpha = self.get_alpha(bs=b // timesteps).to(x.device) x = alpha * x + (1.0 - alpha) * x_mix x = rearrange(x, "b c t h w -> (b t) c h w") return x -class AE3DConv(torch.nn.Conv2d): +class AE3DConv(ops.Conv2d): def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwargs): super().__init__(in_channels, out_channels, *args, **kwargs) if isinstance(video_kernel_size, Iterable): @@ -97,7 +97,7 @@ def __init__(self, in_channels, out_channels, video_kernel_size=3, *args, **kwar else: padding = int(video_kernel_size // 2) - self.time_mix_conv = torch.nn.Conv3d( + self.time_mix_conv = ops.Conv3d( in_channels=out_channels, out_channels=out_channels, kernel_size=video_kernel_size, @@ -167,7 +167,7 @@ def forward(self, x, timesteps=None, skip_time_block=False): emb = emb[:, None, :] x_mix = x_mix + emb - alpha = self.get_alpha() + alpha = self.get_alpha().to(x.device) x_mix = self.time_mix_block(x_mix, timesteps=timesteps) x = alpha * x + (1.0 - alpha) * x_mix # alpha merge From 392878a2621d131ac9e856fb2d428d9c6e2a022e Mon Sep 17 00:00:00 2001 From: shiimizu Date: Mon, 25 Dec 2023 19:17:40 -0800 Subject: [PATCH 346/420] Fix hiding dom widgets. --- web/scripts/domWidget.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index bb4c892b541..eb0742d3882 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -177,7 +177,7 @@ LGraphCanvas.prototype.computeVisibleNodes = function () { for (const w of node.widgets) { if (w.element) { w.element.hidden = hidden; - w.element.style.display = hidden ? "none" : null; + w.element.style.display = hidden ? "none" : undefined; if (hidden) { w.options.onHide?.(w); } From 61b3f15f8f2bc0822cb98eac48742fb32f6af396 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Dec 2023 05:02:02 -0500 Subject: [PATCH 347/420] Fix lowvram mode not working with unCLIP and Revision code. --- comfy/ldm/modules/diffusionmodules/upscaling.py | 4 ++-- comfy/ldm/modules/encoders/noise_aug_modules.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/upscaling.py b/comfy/ldm/modules/diffusionmodules/upscaling.py index 709a7f52e06..768a47f9c29 100644 --- a/comfy/ldm/modules/diffusionmodules/upscaling.py +++ b/comfy/ldm/modules/diffusionmodules/upscaling.py @@ -43,8 +43,8 @@ def register_schedule(self, beta_schedule="linear", timesteps=1000, def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + return (extract_into_tensor(self.sqrt_alphas_cumprod.to(x_start.device), t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod.to(x_start.device), t, x_start.shape) * noise) def forward(self, x): return x, None diff --git a/comfy/ldm/modules/encoders/noise_aug_modules.py b/comfy/ldm/modules/encoders/noise_aug_modules.py index b59bf204bc9..66767b5874a 100644 --- a/comfy/ldm/modules/encoders/noise_aug_modules.py +++ b/comfy/ldm/modules/encoders/noise_aug_modules.py @@ -15,12 +15,12 @@ def __init__(self, *args, clip_stats_path=None, timestep_dim=256, **kwargs): def scale(self, x): # re-normalize to centered mean and unit variance - x = (x - self.data_mean) * 1. / self.data_std + x = (x - self.data_mean.to(x.device)) * 1. / self.data_std.to(x.device) return x def unscale(self, x): # back to original data stats - x = (x * self.data_std) + self.data_mean + x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device) return x def forward(self, x, noise_level=None): From f21bb41787ce590ea6eff16163ee83404d9ff0d5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 26 Dec 2023 12:52:21 -0500 Subject: [PATCH 348/420] Fix taesd VAE in lowvram mode. --- comfy/taesd/taesd.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/comfy/taesd/taesd.py b/comfy/taesd/taesd.py index 46f3097a2a1..8f96c54e56a 100644 --- a/comfy/taesd/taesd.py +++ b/comfy/taesd/taesd.py @@ -7,9 +7,10 @@ import torch.nn as nn import comfy.utils +import comfy.ops def conv(n_in, n_out, **kwargs): - return nn.Conv2d(n_in, n_out, 3, padding=1, **kwargs) + return comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 3, padding=1, **kwargs) class Clamp(nn.Module): def forward(self, x): @@ -19,7 +20,7 @@ class Block(nn.Module): def __init__(self, n_in, n_out): super().__init__() self.conv = nn.Sequential(conv(n_in, n_out), nn.ReLU(), conv(n_out, n_out), nn.ReLU(), conv(n_out, n_out)) - self.skip = nn.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() + self.skip = comfy.ops.disable_weight_init.Conv2d(n_in, n_out, 1, bias=False) if n_in != n_out else nn.Identity() self.fuse = nn.ReLU() def forward(self, x): return self.fuse(self.conv(x) + self.skip(x)) From f15dce71fde5eee12a5689e86468368a1791d200 Mon Sep 17 00:00:00 2001 From: AYF Date: Wed, 27 Dec 2023 00:55:11 -0500 Subject: [PATCH 349/420] Add title to the API workflow json. (#2380) * Add `title` to the API workflow json. * API: Move `title` to `_meta` dictionary, imply unused. --- web/scripts/app.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/web/scripts/app.js b/web/scripts/app.js index 62169abfb82..73dba65cc29 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1784,6 +1784,10 @@ export class ComfyApp { output[String(node.id)] = { inputs, class_type: node.comfyClass, + // Ignored by the backend. + "_meta": { + title: node.title, + }, }; } } From e478b1794e91977c50dc6eea6228ef1248044507 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Dec 2023 01:07:02 -0500 Subject: [PATCH 350/420] Only add _meta title to api prompt when dev mode is enabled in UI. --- web/scripts/app.js | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 73dba65cc29..62b71c0a13c 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1781,14 +1781,19 @@ export class ComfyApp { } } - output[String(node.id)] = { + let node_data = { inputs, class_type: node.comfyClass, + }; + + if (this.ui.settings.getSettingValue("Comfy.DevMode")) { // Ignored by the backend. - "_meta": { + node_data["_meta"] = { title: node.title, - }, - }; + } + } + + output[String(node.id)] = node_data; } } From c782144433e41c21ae2dfd75d0bc28255d2e966d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 27 Dec 2023 13:50:57 -0500 Subject: [PATCH 351/420] Fix clip vision lowvram mode not working. --- comfy/clip_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/clip_model.py b/comfy/clip_model.py index 850b5fdbecb..7397b7a2637 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -151,7 +151,7 @@ def __init__(self, embed_dim, num_channels=3, patch_size=14, image_size=224, dty def forward(self, pixel_values): embeds = self.patch_embedding(pixel_values).flatten(2).transpose(1, 2) - return torch.cat([self.class_embedding.expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight + return torch.cat([self.class_embedding.to(embeds.device).expand(pixel_values.shape[0], 1, -1), embeds], dim=1) + self.position_embedding.weight.to(embeds.device) class CLIPVision(torch.nn.Module): From a8baa40d85aafb7d0d33221ce86eb6ca1402b4c7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Dec 2023 12:23:07 -0500 Subject: [PATCH 352/420] Cleanup. --- .vscode/settings.json | 9 --------- 1 file changed, 9 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 202121e10fc..00000000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "path-intellisense.mappings": { - "../": "${workspaceFolder}/web/extensions/core" - }, - "[python]": { - "editor.defaultFormatter": "ms-python.autopep8" - }, - "python.formatting.provider": "none" -} From e1e322cf69319d125680d791822d8f4733fea027 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Dec 2023 21:41:10 -0500 Subject: [PATCH 353/420] Load weights that can't be lowvramed to target device. --- comfy/model_management.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 3adc42702c8..c0cb4130c6c 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -259,6 +259,14 @@ def get_torch_device_name(device): current_loaded_models = [] +def module_size(module): + module_mem = 0 + sd = module.state_dict() + for k in sd: + t = sd[k] + module_mem += t.nelement() * t.element_size() + return module_mem + class LoadedModel: def __init__(self, model): self.model = model @@ -296,14 +304,14 @@ def model_load(self, lowvram_model_memory=0): if hasattr(m, "comfy_cast_weights"): m.prev_comfy_cast_weights = m.comfy_cast_weights m.comfy_cast_weights = True - module_mem = 0 - sd = m.state_dict() - for k in sd: - t = sd[k] - module_mem += t.nelement() * t.element_size() + module_mem = module_size(m) if mem_counter + module_mem < lowvram_model_memory: m.to(self.device) mem_counter += module_mem + elif hasattr(m, "weight"): #only modules with comfy_cast_weights can be set to lowvram mode + m.to(self.device) + mem_counter += module_size(m) + print("lowvram: loaded module regularly", m) self.model_accelerated = True From 12e822c6c8a9019abd1127e0e61f1405de8d14e3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 28 Dec 2023 21:46:20 -0500 Subject: [PATCH 354/420] Use function to calculate model size in model patcher. --- comfy/model_patcher.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 6acb2d647c0..b1b5ea6a811 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -28,13 +28,9 @@ def model_size(self): if self.size > 0: return self.size model_sd = self.model.state_dict() - size = 0 - for k in model_sd: - t = model_sd[k] - size += t.nelement() * t.element_size() - self.size = size + self.size = comfy.model_management.module_size(self.model) self.model_keys = set(model_sd.keys()) - return size + return self.size def clone(self): n = ModelPatcher(self.model, self.load_device, self.offload_device, self.size, self.current_device, weight_inplace_update=self.weight_inplace_update) From 04b713dda1c4109f84386b17b0f7c25722f0ae15 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 29 Dec 2023 17:33:30 -0500 Subject: [PATCH 355/420] Fix VALIDATE_INPUTS getting called multiple times. Allow VALIDATE_INPUTS to only validate specific inputs. --- execution.py | 58 +++++++++++++++++++++++++++++++--------------------- nodes.py | 5 +---- 2 files changed, 36 insertions(+), 27 deletions(-) diff --git a/execution.py b/execution.py index 7ad171313b0..53ba2e0f8a3 100644 --- a/execution.py +++ b/execution.py @@ -7,6 +7,7 @@ import heapq import traceback import gc +import inspect import torch import nodes @@ -402,6 +403,10 @@ def validate_inputs(prompt, item, validated): errors = [] valid = True + validate_function_inputs = [] + if hasattr(obj_class, "VALIDATE_INPUTS"): + validate_function_inputs = inspect.getfullargspec(obj_class.VALIDATE_INPUTS).args + for x in required_inputs: if x not in inputs: error = { @@ -531,29 +536,7 @@ def validate_inputs(prompt, item, validated): errors.append(error) continue - if hasattr(obj_class, "VALIDATE_INPUTS"): - input_data_all = get_input_data(inputs, obj_class, unique_id) - #ret = obj_class.VALIDATE_INPUTS(**input_data_all) - ret = map_node_over_list(obj_class, input_data_all, "VALIDATE_INPUTS") - for i, r in enumerate(ret): - if r is not True: - details = f"{x}" - if r is not False: - details += f" - {str(r)}" - - error = { - "type": "custom_validation_failed", - "message": "Custom validation failed for node", - "details": details, - "extra_info": { - "input_name": x, - "input_config": info, - "received_value": val, - } - } - errors.append(error) - continue - else: + if x not in validate_function_inputs: if isinstance(type_input, list): if val not in type_input: input_config = info @@ -580,6 +563,35 @@ def validate_inputs(prompt, item, validated): errors.append(error) continue + if len(validate_function_inputs) > 0: + input_data_all = get_input_data(inputs, obj_class, unique_id) + input_filtered = {} + for x in input_data_all: + if x in validate_function_inputs: + input_filtered[x] = input_data_all[x] + + #ret = obj_class.VALIDATE_INPUTS(**input_filtered) + ret = map_node_over_list(obj_class, input_filtered, "VALIDATE_INPUTS") + for x in input_filtered: + for i, r in enumerate(ret): + if r is not True: + details = f"{x}" + if r is not False: + details += f" - {str(r)}" + + error = { + "type": "custom_validation_failed", + "message": "Custom validation failed for node", + "details": details, + "extra_info": { + "input_name": x, + "input_config": info, + "received_value": val, + } + } + errors.append(error) + continue + if len(errors) > 0 or valid is not True: ret = (False, errors, unique_id) else: diff --git a/nodes.py b/nodes.py index 027bf55d994..8e3ec947cd4 100644 --- a/nodes.py +++ b/nodes.py @@ -1491,13 +1491,10 @@ def IS_CHANGED(s, image, channel): return m.digest().hex() @classmethod - def VALIDATE_INPUTS(s, image, channel): + def VALIDATE_INPUTS(s, image): if not folder_paths.exists_annotated_filepath(image): return "Invalid image file: {}".format(image) - if channel not in s._color_channels: - return "Invalid color channel: {}".format(channel) - return True class ImageScale: From 144e6580a4a43d5390769665a5032bb584481ff1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 29 Dec 2023 17:47:24 -0500 Subject: [PATCH 356/420] This cache timeout is pretty useless in practice. --- folder_paths.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 98704945e56..a8726d8dd04 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -184,8 +184,7 @@ def cached_filename_list_(folder_name): if folder_name not in filename_list_cache: return None out = filename_list_cache[folder_name] - if time.perf_counter() < (out[2] + 0.5): - return out + for x in out[1]: time_modified = out[1][x] folder = x From 1b103e0cb2d7aeb05fc8b7e006d4438e7bceca20 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 30 Dec 2023 05:38:21 -0500 Subject: [PATCH 357/420] Add argument to run the VAE on the CPU. --- comfy/cli_args.py | 2 ++ comfy/model_management.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 8de0adb53ee..50d7b62fa95 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -66,6 +66,8 @@ def __call__(self, parser, namespace, values, option_string=None): fpvae_group.add_argument("--fp32-vae", action="store_true", help="Run the VAE in full precision fp32.") fpvae_group.add_argument("--bf16-vae", action="store_true", help="Run the VAE in bf16.") +parser.add_argument("--cpu-vae", action="store_true", help="Run the VAE on the CPU.") + fpte_group = parser.add_mutually_exclusive_group() fpte_group.add_argument("--fp8_e4m3fn-text-enc", action="store_true", help="Store text encoder weights in fp8 (e4m3fn variant).") fpte_group.add_argument("--fp8_e5m2-text-enc", action="store_true", help="Store text encoder weights in fp8 (e5m2 variant).") diff --git a/comfy/model_management.py b/comfy/model_management.py index c0cb4130c6c..fefd3c8c99d 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -186,6 +186,9 @@ def is_nvidia(): if is_intel_xpu(): VAE_DTYPE = torch.bfloat16 +if args.cpu_vae: + VAE_DTYPE = torch.float32 + if args.fp16_vae: VAE_DTYPE = torch.float16 elif args.bf16_vae: @@ -555,6 +558,8 @@ def intermediate_device(): return torch.device("cpu") def vae_device(): + if args.cpu_vae: + return torch.device("cpu") return get_torch_device() def vae_offload_device(): From 36e15f2507ee81e27140cf15ffcda40070968928 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 31 Dec 2023 05:05:14 -0500 Subject: [PATCH 358/420] Reregister nodes when pressing refresh button. --- web/scripts/app.js | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 62b71c0a13c..7353f5a3bdd 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -2020,12 +2020,8 @@ export class ComfyApp { async refreshComboInNodes() { const defs = await api.getNodeDefs(); - for(const nodeId in LiteGraph.registered_node_types) { - const node = LiteGraph.registered_node_types[nodeId]; - const nodeDef = defs[nodeId]; - if(!nodeDef) continue; - - node.nodeData = nodeDef; + for (const nodeId in defs) { + this.registerNodeDef(nodeId, defs[nodeId]); } for(let nodeNum in this.graph._nodes) { From d1f3637a5a944d0607b899babd8ff11d87100503 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 31 Dec 2023 15:37:20 -0500 Subject: [PATCH 359/420] Add a denoise parameter to BasicScheduler node. --- comfy_extras/nodes_custom_sampler.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 8791d8ae3c4..d5f9ba0070a 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -13,6 +13,7 @@ def INPUT_TYPES(s): {"model": ("MODEL",), "scheduler": (comfy.samplers.SCHEDULER_NAMES, ), "steps": ("INT", {"default": 20, "min": 1, "max": 10000}), + "denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), } } RETURN_TYPES = ("SIGMAS",) @@ -20,8 +21,13 @@ def INPUT_TYPES(s): FUNCTION = "get_sigmas" - def get_sigmas(self, model, scheduler, steps): - sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, steps).cpu() + def get_sigmas(self, model, scheduler, steps, denoise): + total_steps = steps + if denoise < 1.0: + total_steps = int(steps/denoise) + + sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu() + sigmas = sigmas[-(steps + 1):] return (sigmas, ) From 66831eb6e96cd974fb2d0fc4f299b23c6af16685 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 1 Jan 2024 14:27:56 -0500 Subject: [PATCH 360/420] Add node id and prompt id to websocket progress packet. --- main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index f6aeceed2af..bcf73872980 100644 --- a/main.py +++ b/main.py @@ -106,6 +106,8 @@ def prompt_worker(q, server): item, item_id = queue_item execution_start_time = time.perf_counter() prompt_id = item[1] + server.last_prompt_id = prompt_id + e.execute(item[2], prompt_id, item[3], item[4]) need_gc = True q.task_done(item_id, e.outputs_ui) @@ -131,7 +133,9 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None): def hijack_progress(server): def hook(value, total, preview_image): comfy.model_management.throw_exception_if_processing_interrupted() - server.send_sync("progress", {"value": value, "max": total}, server.client_id) + progress = {"value": value, "max": total, "prompt_id": server.last_prompt_id, "node": server.last_node_id} + + server.send_sync("progress", progress, server.client_id) if preview_image is not None: server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id) comfy.utils.set_progress_bar_global_hook(hook) From 79f73a4b33c76867098b182cb0db1b657b2996f5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 2 Jan 2024 01:50:29 -0500 Subject: [PATCH 361/420] Remove useless code. --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index 057dd16b250..cb0a7983515 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -437,9 +437,6 @@ def __init__( operations=ops, ): super().__init__() - assert use_spatial_transformer == True, "use_spatial_transformer has to be true" - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' @@ -456,7 +453,6 @@ def __init__( if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels self.out_channels = out_channels From a47f609f904842a12c54c465fc93bda38257e289 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 2 Jan 2024 01:50:57 -0500 Subject: [PATCH 362/420] Auto detect out_channels from model. --- comfy/model_detection.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index e3af422a310..ad16c0fbf79 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -34,7 +34,6 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config = { "use_checkpoint": False, "image_size": 32, - "out_channels": 4, "use_spatial_transformer": True, "legacy": False } @@ -49,6 +48,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config["dtype"] = dtype model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0] in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1] + out_channels = state_dict['{}out.2.weight'.format(key_prefix)].shape[0] num_res_blocks = [] channel_mult = [] @@ -122,6 +122,7 @@ def detect_unet_config(state_dict, key_prefix, dtype): transformer_depth_middle = -1 unet_config["in_channels"] = in_channels + unet_config["out_channels"] = out_channels unet_config["model_channels"] = model_channels unet_config["num_res_blocks"] = num_res_blocks unet_config["transformer_depth"] = transformer_depth From 8e2c99e3cf3b85390ff9aa47edb7cbd319dfdc3b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 2 Jan 2024 11:50:00 -0500 Subject: [PATCH 363/420] Fix issue when websocket is deleted when data is being sent. --- server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server.py b/server.py index 9b1e3269d7f..bd6f026b279 100644 --- a/server.py +++ b/server.py @@ -584,7 +584,8 @@ async def send_bytes(self, event, data, sid=None): message = self.encode_bytes(event, data) if sid is None: - for ws in self.sockets.values(): + sockets = list(self.sockets.values()) + for ws in sockets: await send_socket_catch_exception(ws.send_bytes, message) elif sid in self.sockets: await send_socket_catch_exception(self.sockets[sid].send_bytes, message) @@ -593,7 +594,8 @@ async def send_json(self, event, data, sid=None): message = {"type": event, "data": data} if sid is None: - for ws in self.sockets.values(): + sockets = list(self.sockets.values()) + for ws in sockets: await send_socket_catch_exception(ws.send_json, message) elif sid in self.sockets: await send_socket_catch_exception(self.sockets[sid].send_json, message) From 5eddfdd80caae18305cde55624c1b932a3e4a360 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 2 Jan 2024 13:24:34 -0500 Subject: [PATCH 364/420] Refactor VAE code. Replace constants with downscale_ratio and latent_channels. --- comfy/sd.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index 220637a05d7..10a6715a800 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -157,6 +157,8 @@ def __init__(self, sd=None, device=None, config=None, dtype=None): self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower) self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) + self.downscale_ratio = 8 + self.latent_channels = 4 if config is None: if "decoder.mid.block_1.mix_factor" in sd: @@ -204,9 +206,9 @@ def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16): decode_fn = lambda a: (self.first_stage_model.decode(a.to(self.vae_dtype).to(self.device)) + 1.0).float() output = torch.clamp(( - (comfy.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + - comfy.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar) + - comfy.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = 8, output_device=self.output_device, pbar = pbar)) + (comfy.utils.tiled_scale(samples, decode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) + + comfy.utils.tiled_scale(samples, decode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar) + + comfy.utils.tiled_scale(samples, decode_fn, tile_x, tile_y, overlap, upscale_amount = self.downscale_ratio, output_device=self.output_device, pbar = pbar)) / 3.0) / 2.0, min=0.0, max=1.0) return output @@ -217,9 +219,9 @@ def encode_tiled_(self, pixel_samples, tile_x=512, tile_y=512, overlap = 64): pbar = comfy.utils.ProgressBar(steps) encode_fn = lambda a: self.first_stage_model.encode((2. * a - 1.).to(self.vae_dtype).to(self.device)).float() - samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) - samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) - samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/8), out_channels=4, output_device=self.output_device, pbar=pbar) + samples = comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x, tile_y, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) + samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x * 2, tile_y // 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) + samples += comfy.utils.tiled_scale(pixel_samples, encode_fn, tile_x // 2, tile_y * 2, overlap, upscale_amount = (1/self.downscale_ratio), out_channels=self.latent_channels, output_device=self.output_device, pbar=pbar) samples /= 3.0 return samples @@ -231,7 +233,7 @@ def decode(self, samples_in): batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * 8), round(samples_in.shape[3] * 8)), device=self.output_device) + pixel_samples = torch.empty((samples_in.shape[0], 3, round(samples_in.shape[2] * self.downscale_ratio), round(samples_in.shape[3] * self.downscale_ratio)), device=self.output_device) for x in range(0, samples_in.shape[0], batch_number): samples = samples_in[x:x+batch_number].to(self.vae_dtype).to(self.device) pixel_samples[x:x+batch_number] = torch.clamp((self.first_stage_model.decode(samples).to(self.output_device).float() + 1.0) / 2.0, min=0.0, max=1.0) @@ -255,7 +257,7 @@ def encode(self, pixel_samples): free_memory = model_management.get_free_memory(self.device) batch_number = int(free_memory / memory_used) batch_number = max(1, batch_number) - samples = torch.empty((pixel_samples.shape[0], 4, round(pixel_samples.shape[2] // 8), round(pixel_samples.shape[3] // 8)), device=self.output_device) + samples = torch.empty((pixel_samples.shape[0], self.latent_channels, round(pixel_samples.shape[2] // self.downscale_ratio), round(pixel_samples.shape[3] // self.downscale_ratio)), device=self.output_device) for x in range(0, pixel_samples.shape[0], batch_number): pixels_in = (2. * pixel_samples[x:x+batch_number] - 1.).to(self.vae_dtype).to(self.device) samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float() From 2c4e92a98b8338f754855a0db7dce164945e366e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 2 Jan 2024 14:41:33 -0500 Subject: [PATCH 365/420] Fix regression. --- comfy/model_detection.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/comfy/model_detection.py b/comfy/model_detection.py index ad16c0fbf79..ea824c44ca1 100644 --- a/comfy/model_detection.py +++ b/comfy/model_detection.py @@ -48,7 +48,12 @@ def detect_unet_config(state_dict, key_prefix, dtype): unet_config["dtype"] = dtype model_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[0] in_channels = state_dict['{}input_blocks.0.0.weight'.format(key_prefix)].shape[1] - out_channels = state_dict['{}out.2.weight'.format(key_prefix)].shape[0] + + out_key = '{}out.2.weight'.format(key_prefix) + if out_key in state_dict: + out_channels = state_dict[out_key].shape[0] + else: + out_channels = 4 num_res_blocks = [] channel_mult = [] From a7874d1a8b88f9e5cc3d37fdba9b763004b6357d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 3 Jan 2024 03:30:39 -0500 Subject: [PATCH 366/420] Add support for the stable diffusion x4 upscaling model. This is an old model. Load the checkpoint like a regular one and use the new SD_4XUpscale_Conditioning node. --- comfy/latent_formats.py | 4 +++ comfy/model_base.py | 21 +++++++++++++++ comfy/sd.py | 5 ++++ comfy/supported_models.py | 28 +++++++++++++++++++- comfy_extras/nodes_sdupscale.py | 45 +++++++++++++++++++++++++++++++++ nodes.py | 1 + 6 files changed, 103 insertions(+), 1 deletion(-) create mode 100644 comfy_extras/nodes_sdupscale.py diff --git a/comfy/latent_formats.py b/comfy/latent_formats.py index c209087e0cc..2252a075ed5 100644 --- a/comfy/latent_formats.py +++ b/comfy/latent_formats.py @@ -33,3 +33,7 @@ def __init__(self): [-0.3112, -0.2359, -0.2076] ] self.taesd_decoder_name = "taesdxl_decoder" + +class SD_X4(LatentFormat): + def __init__(self): + self.scale_factor = 0.08333 diff --git a/comfy/model_base.py b/comfy/model_base.py index b3a1fcd51f0..64a380ff355 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -364,3 +364,24 @@ def extra_conds(self, **kwargs): cross_attn = self.cc_projection(cross_attn) out['c_crossattn'] = comfy.conds.CONDCrossAttn(cross_attn) return out + +class SD_X4Upscaler(BaseModel): + def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None): + super().__init__(model_config, model_type, device=device) + + def extra_conds(self, **kwargs): + out = {} + + image = kwargs.get("concat_image", None) + noise = kwargs.get("noise", None) + + if image is None: + image = torch.zeros_like(noise)[:,:3] + + if image.shape[1:] != noise.shape[1:]: + image = utils.common_upscale(image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + image = utils.resize_to_batch_size(image, noise.shape[0]) + + out['c_concat'] = comfy.conds.CONDNoiseShape(image) + return out diff --git a/comfy/sd.py b/comfy/sd.py index 10a6715a800..1ff25bec630 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -174,6 +174,11 @@ def __init__(self, sd=None, device=None, config=None, dtype=None): else: #default SD1.x/SD2.x VAE parameters ddconfig = {'double_z': True, 'z_channels': 4, 'resolution': 256, 'in_channels': 3, 'out_ch': 3, 'ch': 128, 'ch_mult': [1, 2, 4, 4], 'num_res_blocks': 2, 'attn_resolutions': [], 'dropout': 0.0} + + if 'encoder.down.2.downsample.conv.weight' not in sd: #Stable diffusion x4 upscaler VAE + ddconfig['ch_mult'] = [1, 2, 4] + self.downscale_ratio = 4 + self.first_stage_model = AutoencoderKL(ddconfig=ddconfig, embed_dim=4) else: self.first_stage_model = AutoencoderKL(**(config['params'])) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 251bf6ace86..e7a6cc17918 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -278,6 +278,32 @@ def get_model(self, state_dict, prefix="", device=None): def clip_target(self): return None +class SD_X4Upscaler(SD20): + unet_config = { + "context_dim": 1024, + "model_channels": 256, + 'in_channels': 7, + "use_linear_in_transformer": True, + "adm_in_channels": None, + "use_temporal_attention": False, + } + + unet_extra_config = { + "disable_self_attentions": [True, True, True, False], + "num_heads": 8, + "num_head_channels": -1, + } + + latent_format = latent_formats.SD_X4 + + sampling_settings = { + "linear_start": 0.0001, + "linear_end": 0.02, + } + + def get_model(self, state_dict, prefix="", device=None): + out = model_base.SD_X4Upscaler(self, device=device) + return out -models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega] +models = [Stable_Zero123, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXLRefiner, SDXL, SSD1B, Segmind_Vega, SD_X4Upscaler] models += [SVD_img2vid] diff --git a/comfy_extras/nodes_sdupscale.py b/comfy_extras/nodes_sdupscale.py new file mode 100644 index 00000000000..38a027e0b7f --- /dev/null +++ b/comfy_extras/nodes_sdupscale.py @@ -0,0 +1,45 @@ +import torch +import nodes +import comfy.utils + +class SD_4XUpscale_Conditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": { "images": ("IMAGE",), + "positive": ("CONDITIONING",), + "negative": ("CONDITIONING",), + "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), + # "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}), #TODO + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/upscale_diffusion" + + def encode(self, images, positive, negative, scale_ratio): + width = max(1, round(images.shape[-2] * scale_ratio)) + height = max(1, round(images.shape[-3] * scale_ratio)) + + pixels = comfy.utils.common_upscale((images.movedim(-1,1) * 2.0) - 1.0, width // 4, height // 4, "bilinear", "center") + + out_cp = [] + out_cn = [] + + for t in positive: + n = [t[0], t[1].copy()] + n[1]['concat_image'] = pixels + out_cp.append(n) + + for t in negative: + n = [t[0], t[1].copy()] + n[1]['concat_image'] = pixels + out_cn.append(n) + + latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) + return (out_cp, out_cn, {"samples":latent}) + +NODE_CLASS_MAPPINGS = { + "SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning, +} diff --git a/nodes.py b/nodes.py index 8e3ec947cd4..82244cf76fa 100644 --- a/nodes.py +++ b/nodes.py @@ -1880,6 +1880,7 @@ def init_custom_nodes(): "nodes_sag.py", "nodes_perpneg.py", "nodes_stable3d.py", + "nodes_sdupscale.py", ] for node_file in extras_files: From ef4f6037cbbbd4150c44862eb398428b70f19263 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 3 Jan 2024 12:16:30 -0500 Subject: [PATCH 367/420] Fix model patches not working in custom sampling scheduler nodes. --- comfy/model_patcher.py | 47 ++++++++++++++-------------- comfy_extras/nodes_custom_sampler.py | 8 +++-- 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index b1b5ea6a811..a88b737cca3 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -174,40 +174,41 @@ def model_state_dict(self, filter_prefix=None): sd.pop(k) return sd - def patch_model(self, device_to=None): + def patch_model(self, device_to=None, patch_weights=True): for k in self.object_patches: old = getattr(self.model, k) if k not in self.object_patches_backup: self.object_patches_backup[k] = old setattr(self.model, k, self.object_patches[k]) - model_sd = self.model_state_dict() - for key in self.patches: - if key not in model_sd: - print("could not patch. key doesn't exist in model:", key) - continue + if patch_weights: + model_sd = self.model_state_dict() + for key in self.patches: + if key not in model_sd: + print("could not patch. key doesn't exist in model:", key) + continue - weight = model_sd[key] + weight = model_sd[key] - inplace_update = self.weight_inplace_update + inplace_update = self.weight_inplace_update - if key not in self.backup: - self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) + if key not in self.backup: + self.backup[key] = weight.to(device=self.offload_device, copy=inplace_update) - if device_to is not None: - temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) - else: - temp_weight = weight.to(torch.float32, copy=True) - out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) - if inplace_update: - comfy.utils.copy_to_param(self.model, key, out_weight) - else: - comfy.utils.set_attr(self.model, key, out_weight) - del temp_weight + if device_to is not None: + temp_weight = comfy.model_management.cast_to_device(weight, device_to, torch.float32, copy=True) + else: + temp_weight = weight.to(torch.float32, copy=True) + out_weight = self.calculate_weight(self.patches[key], temp_weight, key).to(weight.dtype) + if inplace_update: + comfy.utils.copy_to_param(self.model, key, out_weight) + else: + comfy.utils.set_attr(self.model, key, out_weight) + del temp_weight - if device_to is not None: - self.model.to(device_to) - self.current_device = device_to + if device_to is not None: + self.model.to(device_to) + self.current_device = device_to return self.model diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index d5f9ba0070a..bb0ed57b256 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -26,7 +26,9 @@ def get_sigmas(self, model, scheduler, steps, denoise): if denoise < 1.0: total_steps = int(steps/denoise) - sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu() + inner_model = model.patch_model(patch_weights=False) + sigmas = comfy.samplers.calculate_sigmas_scheduler(inner_model, scheduler, total_steps).cpu() + model.unpatch_model() sigmas = sigmas[-(steps + 1):] return (sigmas, ) @@ -104,7 +106,9 @@ def INPUT_TYPES(s): def get_sigmas(self, model, steps, denoise): start_step = 10 - int(10 * denoise) timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps] - sigmas = model.model.model_sampling.sigma(timesteps) + inner_model = model.patch_model(patch_weights=False) + sigmas = inner_model.model_sampling.sigma(timesteps) + model.unpatch_model() sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) return (sigmas, ) From 8c6493578b3dda233e9b9a953feeaf1e6ca434ad Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 3 Jan 2024 14:27:11 -0500 Subject: [PATCH 368/420] Implement noise augmentation for SD 4X upscale model. --- .../modules/diffusionmodules/openaimodel.py | 2 +- .../ldm/modules/diffusionmodules/upscaling.py | 12 ++++++---- comfy/model_base.py | 22 ++++++++++++++----- comfy/samplers.py | 4 ++-- comfy/supported_models.py | 1 + comfy_extras/nodes_sdupscale.py | 6 +++-- 6 files changed, 33 insertions(+), 14 deletions(-) diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index cb0a7983515..ea936e06623 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -498,7 +498,7 @@ def __init__( if self.num_classes is not None: if isinstance(self.num_classes, int): - self.label_emb = nn.Embedding(num_classes, time_embed_dim) + self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device) elif self.num_classes == "continuous": print("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) diff --git a/comfy/ldm/modules/diffusionmodules/upscaling.py b/comfy/ldm/modules/diffusionmodules/upscaling.py index 768a47f9c29..f5ac7c2f913 100644 --- a/comfy/ldm/modules/diffusionmodules/upscaling.py +++ b/comfy/ldm/modules/diffusionmodules/upscaling.py @@ -41,8 +41,12 @@ def register_schedule(self, beta_schedule="linear", timesteps=1000, self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) + def q_sample(self, x_start, t, noise=None, seed=None): + if noise is None: + if seed is None: + noise = torch.randn_like(x_start) + else: + noise = torch.randn(x_start.size(), dtype=x_start.dtype, layout=x_start.layout, generator=torch.manual_seed(seed)).to(x_start.device) return (extract_into_tensor(self.sqrt_alphas_cumprod.to(x_start.device), t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod.to(x_start.device), t, x_start.shape) * noise) @@ -69,12 +73,12 @@ def __init__(self, noise_schedule_config, max_noise_level=1000, to_cuda=False): super().__init__(noise_schedule_config=noise_schedule_config) self.max_noise_level = max_noise_level - def forward(self, x, noise_level=None): + def forward(self, x, noise_level=None, seed=None): if noise_level is None: noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() else: assert isinstance(noise_level, torch.Tensor) - z = self.q_sample(x, noise_level) + z = self.q_sample(x, noise_level, seed=seed) return z, noise_level diff --git a/comfy/model_base.py b/comfy/model_base.py index 64a380ff355..f59526204cb 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1,7 +1,7 @@ import torch -from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel +from comfy.ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep from comfy.ldm.modules.encoders.noise_aug_modules import CLIPEmbeddingNoiseAugmentation -from comfy.ldm.modules.diffusionmodules.openaimodel import Timestep +from comfy.ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation import comfy.model_management import comfy.conds import comfy.ops @@ -78,8 +78,9 @@ def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, trans extra_conds = {} for o in kwargs: extra = kwargs[o] - if hasattr(extra, "to"): - extra = extra.to(dtype) + if hasattr(extra, "dtype"): + if extra.dtype != torch.int and extra.dtype != torch.long: + extra = extra.to(dtype) extra_conds[o] = extra model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() @@ -368,20 +369,31 @@ def extra_conds(self, **kwargs): class SD_X4Upscaler(BaseModel): def __init__(self, model_config, model_type=ModelType.V_PREDICTION, device=None): super().__init__(model_config, model_type, device=device) + self.noise_augmentor = ImageConcatWithNoiseAugmentation(noise_schedule_config={"linear_start": 0.0001, "linear_end": 0.02}, max_noise_level=350) def extra_conds(self, **kwargs): out = {} image = kwargs.get("concat_image", None) noise = kwargs.get("noise", None) + noise_augment = kwargs.get("noise_augmentation", 0.0) + device = kwargs["device"] + seed = kwargs["seed"] - 10 + + noise_level = round((self.noise_augmentor.max_noise_level) * noise_augment) if image is None: image = torch.zeros_like(noise)[:,:3] if image.shape[1:] != noise.shape[1:]: - image = utils.common_upscale(image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + image = utils.common_upscale(image.to(device), noise.shape[-1], noise.shape[-2], "bilinear", "center") + + noise_level = torch.tensor([noise_level], device=device) + if noise_augment > 0: + image, noise_level = self.noise_augmentor(image.to(device), noise_level=noise_level, seed=seed) image = utils.resize_to_batch_size(image, noise.shape[0]) out['c_concat'] = comfy.conds.CONDNoiseShape(image) + out['y'] = comfy.conds.CONDRegular(noise_level) return out diff --git a/comfy/samplers.py b/comfy/samplers.py index 0453c1f6fda..89d8d4f2821 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -603,8 +603,8 @@ def sample(model, noise, positive, negative, cfg, device, sampler, sigmas, model latent_image = model.process_latent_in(latent_image) if hasattr(model, 'extra_conds'): - positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask) - negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask) + positive = encode_model_conds(model.extra_conds, positive, noise, device, "positive", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) + negative = encode_model_conds(model.extra_conds, negative, noise, device, "negative", latent_image=latent_image, denoise_mask=denoise_mask, seed=seed) #make sure each cond area has an opposite one with the same area for c in positive: diff --git a/comfy/supported_models.py b/comfy/supported_models.py index e7a6cc17918..1d442d4dd9c 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -290,6 +290,7 @@ class SD_X4Upscaler(SD20): unet_extra_config = { "disable_self_attentions": [True, True, True, False], + "num_classes": 1000, "num_heads": 8, "num_head_channels": -1, } diff --git a/comfy_extras/nodes_sdupscale.py b/comfy_extras/nodes_sdupscale.py index 38a027e0b7f..28c1cb0f171 100644 --- a/comfy_extras/nodes_sdupscale.py +++ b/comfy_extras/nodes_sdupscale.py @@ -9,7 +9,7 @@ def INPUT_TYPES(s): "positive": ("CONDITIONING",), "negative": ("CONDITIONING",), "scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}), - # "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.01}), #TODO + "noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), }} RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") RETURN_NAMES = ("positive", "negative", "latent") @@ -18,7 +18,7 @@ def INPUT_TYPES(s): CATEGORY = "conditioning/upscale_diffusion" - def encode(self, images, positive, negative, scale_ratio): + def encode(self, images, positive, negative, scale_ratio, noise_augmentation): width = max(1, round(images.shape[-2] * scale_ratio)) height = max(1, round(images.shape[-3] * scale_ratio)) @@ -30,11 +30,13 @@ def encode(self, images, positive, negative, scale_ratio): for t in positive: n = [t[0], t[1].copy()] n[1]['concat_image'] = pixels + n[1]['noise_augmentation'] = noise_augmentation out_cp.append(n) for t in negative: n = [t[0], t[1].copy()] n[1]['concat_image'] = pixels + n[1]['noise_augmentation'] = noise_augmentation out_cn.append(n) latent = torch.zeros([images.shape[0], 4, height // 4, width // 4]) From 6d281b4ff4ad3918a4f3b4ca4a8b547a2ba3bf80 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 4 Jan 2024 14:28:11 -0500 Subject: [PATCH 369/420] Add a /free route to unload models or free all memory. A POST request to /free with: {"unload_models":true} will unload models from vram. A POST request to /free with: {"free_memory":true} will unload models and free all cached data from the last run workflow. --- execution.py | 20 +++++++++++++++++++- main.py | 15 ++++++++++++++- server.py | 11 +++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/execution.py b/execution.py index 53ba2e0f8a3..260a08970c0 100644 --- a/execution.py +++ b/execution.py @@ -268,11 +268,14 @@ def recursive_output_delete_if_changed(prompt, old_prompt, outputs, current_item class PromptExecutor: def __init__(self, server): + self.server = server + self.reset() + + def reset(self): self.outputs = {} self.object_storage = {} self.outputs_ui = {} self.old_prompt = {} - self.server = server def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex): node_id = error["node_id"] @@ -706,6 +709,7 @@ def __init__(self, server): self.queue = [] self.currently_running = {} self.history = {} + self.flags = {} server.prompt_queue = self def put(self, item): @@ -792,3 +796,17 @@ def wipe_history(self): def delete_history_item(self, id_to_delete): with self.mutex: self.history.pop(id_to_delete, None) + + def set_flag(self, name, data): + with self.mutex: + self.flags[name] = data + self.not_empty.notify() + + def get_flags(self, reset=True): + with self.mutex: + if reset: + ret = self.flags + self.flags = {} + return ret + else: + return self.flags.copy() diff --git a/main.py b/main.py index bcf73872980..45d5d41b3b8 100644 --- a/main.py +++ b/main.py @@ -97,7 +97,7 @@ def prompt_worker(q, server): gc_collect_interval = 10.0 while True: - timeout = None + timeout = 1000.0 if need_gc: timeout = max(gc_collect_interval - (current_time - last_gc_collect), 0.0) @@ -118,6 +118,19 @@ def prompt_worker(q, server): execution_time = current_time - execution_start_time print("Prompt executed in {:.2f} seconds".format(execution_time)) + flags = q.get_flags() + free_memory = flags.get("free_memory", False) + + if flags.get("unload_models", free_memory): + comfy.model_management.unload_all_models() + need_gc = True + last_gc_collect = 0 + + if free_memory: + e.reset() + need_gc = True + last_gc_collect = 0 + if need_gc: current_time = time.perf_counter() if (current_time - last_gc_collect) > gc_collect_interval: diff --git a/server.py b/server.py index bd6f026b279..acb8875d21e 100644 --- a/server.py +++ b/server.py @@ -507,6 +507,17 @@ async def post_interrupt(request): nodes.interrupt_processing() return web.Response(status=200) + @routes.post("/free") + async def post_interrupt(request): + json_data = await request.json() + unload_models = json_data.get("unload_models", False) + free_memory = json_data.get("free_memory", False) + if unload_models: + self.prompt_queue.set_flag("unload_models", unload_models) + if free_memory: + self.prompt_queue.set_flag("free_memory", free_memory) + return web.Response(status=200) + @routes.post("/history") async def post_history(request): json_data = await request.json() From 35322a376630e087d65f3108b0a6ec7a4b39279b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 5 Jan 2024 04:20:03 -0500 Subject: [PATCH 370/420] StableZero123_Conditioning_Batched node. This node lets you generate a batch of images with different elevations or azimuths by setting the elevation_batch_increment and/or azimuth_batch_increment. It also sets the batch index for the latents so that the same init noise is used on each frame. --- comfy_extras/nodes_stable3d.py | 44 ++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py index c6791d8de2a..e02a9875a97 100644 --- a/comfy_extras/nodes_stable3d.py +++ b/comfy_extras/nodes_stable3d.py @@ -53,6 +53,50 @@ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevat latent = torch.zeros([batch_size, 4, height // 8, width // 8]) return (positive, negative, {"samples":latent}) +class StableZero123_Conditioning_Batched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "elevation": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "azimuth": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "elevation_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + "azimuth_batch_increment": ("FLOAT", {"default": 0.0, "min": -180.0, "max": 180.0}), + }} + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + + FUNCTION = "encode" + + CATEGORY = "conditioning/3d_models" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevation, azimuth, elevation_batch_increment, azimuth_batch_increment): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + cam_embeds = [] + for i in range(batch_size): + cam_embeds.append(camera_embeddings(elevation, azimuth)) + elevation += elevation_batch_increment + azimuth += azimuth_batch_increment + + cam_embeds = torch.cat(cam_embeds, dim=0) + cond = torch.cat([comfy.utils.repeat_to_batch_size(pooled, batch_size), cam_embeds], dim=-1) + + positive = [[cond, {"concat_latent_image": t}]] + negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]] + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (positive, negative, {"samples":latent, "batch_index": [0] * batch_size}) + + NODE_CLASS_MAPPINGS = { "StableZero123_Conditioning": StableZero123_Conditioning, + "StableZero123_Conditioning_Batched": StableZero123_Conditioning_Batched, } From 7c9a0f7e0ad212719cd2c6179ae6266d949ea401 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 5 Jan 2024 12:31:13 -0500 Subject: [PATCH 371/420] Fix BasicScheduler issue with Loras. --- comfy_extras/nodes_custom_sampler.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index bb0ed57b256..f02cb5ef788 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -26,9 +26,8 @@ def get_sigmas(self, model, scheduler, steps, denoise): if denoise < 1.0: total_steps = int(steps/denoise) - inner_model = model.patch_model(patch_weights=False) - sigmas = comfy.samplers.calculate_sigmas_scheduler(inner_model, scheduler, total_steps).cpu() - model.unpatch_model() + comfy.model_management.load_models_gpu([model]) + sigmas = comfy.samplers.calculate_sigmas_scheduler(model.model, scheduler, total_steps).cpu() sigmas = sigmas[-(steps + 1):] return (sigmas, ) From af94eb14e3b44f6e6c48a7762a5e229cf3a006e4 Mon Sep 17 00:00:00 2001 From: ramyma Date: Sat, 6 Jan 2024 04:27:09 +0200 Subject: [PATCH 372/420] fix: `/free` handler function name --- server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.py b/server.py index acb8875d21e..5b821dcbf9f 100644 --- a/server.py +++ b/server.py @@ -508,7 +508,7 @@ async def post_interrupt(request): return web.Response(status=200) @routes.post("/free") - async def post_interrupt(request): + async def post_free(request): json_data = await request.json() unload_models = json_data.get("unload_models", False) free_memory = json_data.get("free_memory", False) From 3ad0191bfb7674486734db98769ab466f27e9362 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 6 Jan 2024 04:33:03 -0500 Subject: [PATCH 373/420] Implement attention mask on xformers. --- comfy/ldm/modules/attention.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 3e12886b07f..14d41a8cd9e 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -294,11 +294,14 @@ def attention_xformers(q, k, v, heads, mask=None): (q, k, v), ) - # actually compute the attention, what we cannot get enough of - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) + if mask is not None: + pad = 8 - q.shape[1] % 8 + mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device) + mask_out[:, :, :mask.shape[-1]] = mask + mask = mask_out[:, :, :mask.shape[-1]] + + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask) - if exists(mask): - raise NotImplementedError out = ( out.unsqueeze(0) .reshape(b, heads, -1, dim_head) From 0c2c9fbdfa53c2ad3b7658a7f2300da831830388 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 6 Jan 2024 13:16:48 -0500 Subject: [PATCH 374/420] Support attention mask in split attention. --- comfy/ldm/modules/attention.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 14d41a8cd9e..a18a6929455 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -239,6 +239,12 @@ def attention_split(q, k, v, heads, mask=None): else: s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale + if mask is not None: + if len(mask.shape) == 2: + s1 += mask[i:end] + else: + s1 += mask[:, i:end] + s2 = s1.softmax(dim=-1).to(v.dtype) del s1 first_op_done = True From aaa9017302b75cf4453b5f8a58788e121f8e0a39 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 7 Jan 2024 04:13:58 -0500 Subject: [PATCH 375/420] Add attention mask support to sub quad attention. --- comfy/ldm/modules/attention.py | 1 + comfy/ldm/modules/sub_quadratic_attention.py | 30 +++++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index a18a6929455..8015a307a0b 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -177,6 +177,7 @@ def attention_sub_quad(query, key, value, heads, mask=None): kv_chunk_size_min=kv_chunk_size_min, use_checkpoint=False, upcast_attention=upcast_attention, + mask=mask, ) hidden_states = hidden_states.to(dtype) diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index 8e8e8054dfd..cb0896b0df5 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -61,6 +61,7 @@ def _summarize_chunk( value: Tensor, scale: float, upcast_attention: bool, + mask, ) -> AttnChunk: if upcast_attention: with torch.autocast(enabled=False, device_type = 'cuda'): @@ -84,6 +85,8 @@ def _summarize_chunk( max_score, _ = torch.max(attn_weights, -1, keepdim=True) max_score = max_score.detach() attn_weights -= max_score + if mask is not None: + attn_weights += mask torch.exp(attn_weights, out=attn_weights) exp_weights = attn_weights.to(value.dtype) exp_values = torch.bmm(exp_weights, value) @@ -96,11 +99,12 @@ def _query_chunk_attention( value: Tensor, summarize_chunk: SummarizeChunk, kv_chunk_size: int, + mask, ) -> Tensor: batch_x_heads, k_channels_per_head, k_tokens = key_t.shape _, _, v_channels_per_head = value.shape - def chunk_scanner(chunk_idx: int) -> AttnChunk: + def chunk_scanner(chunk_idx: int, mask) -> AttnChunk: key_chunk = dynamic_slice( key_t, (0, 0, chunk_idx), @@ -111,10 +115,13 @@ def chunk_scanner(chunk_idx: int) -> AttnChunk: (0, chunk_idx, 0), (batch_x_heads, kv_chunk_size, v_channels_per_head) ) - return summarize_chunk(query, key_chunk, value_chunk) + if mask is not None: + mask = mask[:,:,chunk_idx:chunk_idx + kv_chunk_size] + + return summarize_chunk(query, key_chunk, value_chunk, mask=mask) chunks: List[AttnChunk] = [ - chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size) + chunk_scanner(chunk, mask) for chunk in torch.arange(0, k_tokens, kv_chunk_size) ] acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks))) chunk_values, chunk_weights, chunk_max = acc_chunk @@ -135,6 +142,7 @@ def _get_attention_scores_no_kv_chunking( value: Tensor, scale: float, upcast_attention: bool, + mask, ) -> Tensor: if upcast_attention: with torch.autocast(enabled=False, device_type = 'cuda'): @@ -156,6 +164,8 @@ def _get_attention_scores_no_kv_chunking( beta=0, ) + if mask is not None: + attn_scores += mask try: attn_probs = attn_scores.softmax(dim=-1) del attn_scores @@ -183,6 +193,7 @@ def efficient_dot_product_attention( kv_chunk_size_min: Optional[int] = None, use_checkpoint=True, upcast_attention=False, + mask = None, ): """Computes efficient dot-product attention given query, transposed key, and value. This is efficient version of attention presented in @@ -209,13 +220,22 @@ def efficient_dot_product_attention( if kv_chunk_size_min is not None: kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min) + if mask is not None and len(mask.shape) == 2: + mask = mask.unsqueeze(0) + def get_query_chunk(chunk_idx: int) -> Tensor: return dynamic_slice( query, (0, chunk_idx, 0), (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head) ) - + + def get_mask_chunk(chunk_idx: int) -> Tensor: + if mask is None: + return None + chunk = min(query_chunk_size, q_tokens) + return mask[:,chunk_idx:chunk_idx + chunk] + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale, upcast_attention=upcast_attention) summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk compute_query_chunk_attn: ComputeQueryChunkAttn = partial( @@ -237,6 +257,7 @@ def get_query_chunk(chunk_idx: int) -> Tensor: query=query, key_t=key_t, value=value, + mask=mask, ) # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance, @@ -246,6 +267,7 @@ def get_query_chunk(chunk_idx: int) -> Tensor: query=get_query_chunk(i * query_chunk_size), key_t=key_t, value=value, + mask=get_mask_chunk(i * query_chunk_size) ) for i in range(math.ceil(q_tokens / query_chunk_size)) ], dim=1) return res From c6951548cfec64c28082e6560c69c59e32729c9c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 7 Jan 2024 13:52:08 -0500 Subject: [PATCH 376/420] Update optimized_attention_for_device function for new functions that support masked attention. --- comfy/clip_model.py | 2 +- comfy/ldm/modules/attention.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/comfy/clip_model.py b/comfy/clip_model.py index 7397b7a2637..09e7bbca152 100644 --- a/comfy/clip_model.py +++ b/comfy/clip_model.py @@ -57,7 +57,7 @@ def __init__(self, num_layers, embed_dim, heads, intermediate_size, intermediate self.layers = torch.nn.ModuleList([CLIPLayer(embed_dim, heads, intermediate_size, intermediate_activation, dtype, device, operations) for i in range(num_layers)]) def forward(self, x, mask=None, intermediate_output=None): - optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None) + optimized_attention = optimized_attention_for_device(x.device, mask=mask is not None, small_input=True) if intermediate_output is not None: if intermediate_output < 0: diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 8015a307a0b..309240d5cc7 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -333,7 +333,6 @@ def attention_pytorch(q, k, v, heads, mask=None): optimized_attention = attention_basic -optimized_attention_masked = attention_basic if model_management.xformers_enabled(): print("Using xformers cross attention") @@ -349,15 +348,15 @@ def attention_pytorch(q, k, v, heads, mask=None): print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") optimized_attention = attention_sub_quad -if model_management.pytorch_attention_enabled(): - optimized_attention_masked = attention_pytorch +optimized_attention_masked = optimized_attention + +def optimized_attention_for_device(device, mask=False, small_input=False): + if small_input and model_management.pytorch_attention_enabled(): + return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases + + if device == torch.device("cpu"): + return attention_sub_quad -def optimized_attention_for_device(device, mask=False): - if device == torch.device("cpu"): #TODO - if model_management.pytorch_attention_enabled(): - return attention_pytorch - else: - return attention_basic if mask: return optimized_attention_masked From 6a10640f0dd019dd7c74006909f38d0056c317bd Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 8 Jan 2024 03:46:36 -0500 Subject: [PATCH 377/420] Support properly loading images with mode I. --- nodes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nodes.py b/nodes.py index 82244cf76fa..1ea0669b100 100644 --- a/nodes.py +++ b/nodes.py @@ -1415,6 +1415,8 @@ def load_image(self, image): output_masks = [] for i in ImageSequence.Iterator(img): i = ImageOps.exif_transpose(i) + if i.mode == 'I': + i = i.point(lambda i: i * (1 / 255)) image = i.convert("RGB") image = np.array(image).astype(np.float32) / 255.0 image = torch.from_numpy(image)[None,] From 235727fed79880ac2053a1db1b0d13c0f75714e8 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 8 Jan 2024 22:06:44 +0000 Subject: [PATCH 378/420] Store user settings/data on the server and multi user support (#2160) * wip per user data * Rename, hide menu * better error rework default user * store pretty * Add userdata endpoints Change nodetemplates to userdata * add multi user message * make normal arg * Fix tests * Ignore user dir * user tests * Changed to default to browser storage and add server-storage arg * fix crash on empty templates * fix settings added before load * ignore parse errors --- .gitignore | 3 +- app/app_settings.py | 54 +++++ app/user_manager.py | 141 ++++++++++++ comfy/cli_args.py | 6 + folder_paths.py | 1 + server.py | 3 + tests-ui/babel.config.json | 3 +- tests-ui/package-lock.json | 20 ++ tests-ui/package.json | 1 + tests-ui/tests/users.test.js | 295 +++++++++++++++++++++++++ tests-ui/utils/index.js | 16 +- tests-ui/utils/setup.js | 36 +++- web/extensions/core/nodeTemplates.js | 64 ++++-- web/index.html | 30 ++- web/scripts/api.js | 100 +++++++++ web/scripts/app.js | 82 +++++++ web/scripts/ui.js | 269 +---------------------- web/scripts/ui/dialog.js | 32 +++ web/scripts/ui/settings.js | 307 +++++++++++++++++++++++++++ web/scripts/ui/spinner.css | 34 +++ web/scripts/ui/spinner.js | 9 + web/scripts/ui/userSelection.css | 135 ++++++++++++ web/scripts/ui/userSelection.js | 114 ++++++++++ web/scripts/utils.js | 21 ++ web/style.css | 2 + 25 files changed, 1496 insertions(+), 282 deletions(-) create mode 100644 app/app_settings.py create mode 100644 app/user_manager.py create mode 100644 tests-ui/tests/users.test.js create mode 100644 web/scripts/ui/dialog.js create mode 100644 web/scripts/ui/settings.js create mode 100644 web/scripts/ui/spinner.css create mode 100644 web/scripts/ui/spinner.js create mode 100644 web/scripts/ui/userSelection.css create mode 100644 web/scripts/ui/userSelection.js diff --git a/.gitignore b/.gitignore index 43c038e4161..9f0389241ea 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,5 @@ venv/ /web/extensions/* !/web/extensions/logging.js.example !/web/extensions/core/ -/tests-ui/data/object_info.json \ No newline at end of file +/tests-ui/data/object_info.json +/user/ \ No newline at end of file diff --git a/app/app_settings.py b/app/app_settings.py new file mode 100644 index 00000000000..8c6edc56c1d --- /dev/null +++ b/app/app_settings.py @@ -0,0 +1,54 @@ +import os +import json +from aiohttp import web + + +class AppSettings(): + def __init__(self, user_manager): + self.user_manager = user_manager + + def get_settings(self, request): + file = self.user_manager.get_request_user_filepath( + request, "comfy.settings.json") + if os.path.isfile(file): + with open(file) as f: + return json.load(f) + else: + return {} + + def save_settings(self, request, settings): + file = self.user_manager.get_request_user_filepath( + request, "comfy.settings.json") + with open(file, "w") as f: + f.write(json.dumps(settings, indent=4)) + + def add_routes(self, routes): + @routes.get("/settings") + async def get_settings(request): + return web.json_response(self.get_settings(request)) + + @routes.get("/settings/{id}") + async def get_setting(request): + value = None + settings = self.get_settings(request) + setting_id = request.match_info.get("id", None) + if setting_id and setting_id in settings: + value = settings[setting_id] + return web.json_response(value) + + @routes.post("/settings") + async def post_settings(request): + settings = self.get_settings(request) + new_settings = await request.json() + self.save_settings(request, {**settings, **new_settings}) + return web.Response(status=200) + + @routes.post("/settings/{id}") + async def post_setting(request): + setting_id = request.match_info.get("id", None) + if not setting_id: + return web.Response(status=400) + settings = self.get_settings(request) + settings[setting_id] = await request.json() + self.save_settings(request, settings) + return web.Response(status=200) \ No newline at end of file diff --git a/app/user_manager.py b/app/user_manager.py new file mode 100644 index 00000000000..5a1031744e7 --- /dev/null +++ b/app/user_manager.py @@ -0,0 +1,141 @@ +import json +import os +import re +import uuid +from aiohttp import web +from comfy.cli_args import args +from folder_paths import user_directory +from .app_settings import AppSettings + +default_user = "default" +users_file = os.path.join(user_directory, "users.json") + + +class UserManager(): + def __init__(self): + global user_directory + + self.settings = AppSettings(self) + if not os.path.exists(user_directory): + os.mkdir(user_directory) + if not args.multi_user: + print("****** User settings have been changed to be stored on the server instead of browser storage. ******") + print("****** For multi-user setups add the --multi-user CLI argument to enable multiple user profiles. ******") + + if args.multi_user: + if os.path.isfile(users_file): + with open(users_file) as f: + self.users = json.load(f) + else: + self.users = {} + else: + self.users = {"default": "default"} + + def get_request_user_id(self, request): + user = "default" + if args.multi_user and "comfy-user" in request.headers: + user = request.headers["comfy-user"] + + if user not in self.users: + raise KeyError("Unknown user: " + user) + + return user + + def get_request_user_filepath(self, request, file, type="userdata", create_dir=True): + global user_directory + + if type == "userdata": + root_dir = user_directory + else: + raise KeyError("Unknown filepath type:" + type) + + user = self.get_request_user_id(request) + path = user_root = os.path.abspath(os.path.join(root_dir, user)) + + # prevent leaving /{type} + if os.path.commonpath((root_dir, user_root)) != root_dir: + return None + + parent = user_root + + if file is not None: + # prevent leaving /{type}/{user} + path = os.path.abspath(os.path.join(user_root, file)) + if os.path.commonpath((user_root, path)) != user_root: + return None + parent = os.path.join(path, os.pardir) + + if create_dir and not os.path.exists(parent): + os.mkdir(parent) + + return path + + def add_user(self, name): + name = name.strip() + if not name: + raise ValueError("username not provided") + user_id = re.sub("[^a-zA-Z0-9-_]+", '-', name) + user_id = user_id + "_" + str(uuid.uuid4()) + + self.users[user_id] = name + + global users_file + with open(users_file, "w") as f: + json.dump(self.users, f) + + return user_id + + def add_routes(self, routes): + self.settings.add_routes(routes) + + @routes.get("/users") + async def get_users(request): + if args.multi_user: + return web.json_response({"storage": "server", "users": self.users}) + else: + user_dir = self.get_request_user_filepath(request, None, create_dir=False) + return web.json_response({ + "storage": "server" if args.server_storage else "browser", + "migrated": os.path.exists(user_dir) + }) + + @routes.post("/users") + async def post_users(request): + body = await request.json() + username = body["username"] + if username in self.users.values(): + return web.json_response({"error": "Duplicate username."}, status=400) + + user_id = self.add_user(username) + return web.json_response(user_id) + + @routes.get("/userdata/{file}") + async def getuserdata(request): + file = request.match_info.get("file", None) + if not file: + return web.Response(status=400) + + path = self.get_request_user_filepath(request, file) + if not path: + return web.Response(status=403) + + if not os.path.exists(path): + return web.Response(status=404) + + return web.FileResponse(path) + + @routes.post("/userdata/{file}") + async def post_userdata(request): + file = request.match_info.get("file", None) + if not file: + return web.Response(status=400) + + path = self.get_request_user_filepath(request, file) + if not path: + return web.Response(status=403) + + body = await request.read() + with open(path, "wb") as f: + f.write(body) + + return web.Response(status=200) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 50d7b62fa95..c02bbf2ba7b 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -112,6 +112,9 @@ class LatentPreviewMethod(enum.Enum): parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") +parser.add_argument("--server-storage", action="store_true", help="Saves settings and other user configuration on the server instead of in browser storage.") +parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage. If enabled, server-storage will be unconditionally enabled.") + if comfy.options.args_parsing: args = parser.parse_args() else: @@ -122,3 +125,6 @@ class LatentPreviewMethod(enum.Enum): if args.disable_auto_launch: args.auto_launch = False + +if args.multi_user: + args.server_storage = True \ No newline at end of file diff --git a/folder_paths.py b/folder_paths.py index a8726d8dd04..641e5f0b26a 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -34,6 +34,7 @@ output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp") input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input") +user_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "user") filename_list_cache = {} diff --git a/server.py b/server.py index 5b821dcbf9f..1135fff88bd 100644 --- a/server.py +++ b/server.py @@ -30,6 +30,7 @@ import comfy.utils import comfy.model_management +from app.user_manager import UserManager class BinaryEventTypes: PREVIEW_IMAGE = 1 @@ -72,6 +73,7 @@ def __init__(self, loop): mimetypes.init() mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8' + self.user_manager = UserManager() self.supports = ["custom_nodes_from_web"] self.prompt_queue = None self.loop = loop @@ -532,6 +534,7 @@ async def post_history(request): return web.Response(status=200) def add_routes(self): + self.user_manager.add_routes(self.routes) self.app.add_routes(self.routes) for name, dir in nodes.EXTENSION_WEB_DIRS.items(): diff --git a/tests-ui/babel.config.json b/tests-ui/babel.config.json index 526ddfd8df1..f27d6c397e5 100644 --- a/tests-ui/babel.config.json +++ b/tests-ui/babel.config.json @@ -1,3 +1,4 @@ { - "presets": ["@babel/preset-env"] + "presets": ["@babel/preset-env"], + "plugins": ["babel-plugin-transform-import-meta"] } diff --git a/tests-ui/package-lock.json b/tests-ui/package-lock.json index 35911cd7ffd..0f409ca2484 100644 --- a/tests-ui/package-lock.json +++ b/tests-ui/package-lock.json @@ -11,6 +11,7 @@ "devDependencies": { "@babel/preset-env": "^7.22.20", "@types/jest": "^29.5.5", + "babel-plugin-transform-import-meta": "^2.2.1", "jest": "^29.7.0", "jest-environment-jsdom": "^29.7.0" } @@ -2591,6 +2592,19 @@ "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, + "node_modules/babel-plugin-transform-import-meta": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/babel-plugin-transform-import-meta/-/babel-plugin-transform-import-meta-2.2.1.tgz", + "integrity": "sha512-AxNh27Pcg8Kt112RGa3Vod2QS2YXKKJ6+nSvRtv7qQTJAdx0MZa4UHZ4lnxHUWA2MNbLuZQv5FVab4P1CoLOWw==", + "dev": true, + "dependencies": { + "@babel/template": "^7.4.4", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "@babel/core": "^7.10.0" + } + }, "node_modules/babel-preset-current-node-syntax": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz", @@ -5233,6 +5247,12 @@ "node": ">=12" } }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==", + "dev": true + }, "node_modules/type-detect": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", diff --git a/tests-ui/package.json b/tests-ui/package.json index e7b60ad8e75..ae7e490843a 100644 --- a/tests-ui/package.json +++ b/tests-ui/package.json @@ -24,6 +24,7 @@ "devDependencies": { "@babel/preset-env": "^7.22.20", "@types/jest": "^29.5.5", + "babel-plugin-transform-import-meta": "^2.2.1", "jest": "^29.7.0", "jest-environment-jsdom": "^29.7.0" } diff --git a/tests-ui/tests/users.test.js b/tests-ui/tests/users.test.js new file mode 100644 index 00000000000..5e07307306e --- /dev/null +++ b/tests-ui/tests/users.test.js @@ -0,0 +1,295 @@ +// @ts-check +/// +const { start } = require("../utils"); +const lg = require("../utils/litegraph"); + +describe("users", () => { + beforeEach(() => { + lg.setup(global); + }); + + afterEach(() => { + lg.teardown(global); + }); + + function expectNoUserScreen() { + // Ensure login isnt visible + const selection = document.querySelectorAll("#comfy-user-selection")?.[0]; + expect(selection["style"].display).toBe("none"); + const menu = document.querySelectorAll(".comfy-menu")?.[0]; + expect(window.getComputedStyle(menu)?.display).not.toBe("none"); + } + + describe("multi-user", () => { + function mockAddStylesheet() { + const utils = require("../../web/scripts/utils"); + utils.addStylesheet = jest.fn().mockReturnValue(Promise.resolve()); + } + + async function waitForUserScreenShow() { + mockAddStylesheet(); + + // Wait for "show" to be called + const { UserSelectionScreen } = require("../../web/scripts/ui/userSelection"); + let resolve, reject; + const fn = UserSelectionScreen.prototype.show; + const p = new Promise((res, rej) => { + resolve = res; + reject = rej; + }); + jest.spyOn(UserSelectionScreen.prototype, "show").mockImplementation(async (...args) => { + const res = fn(...args); + await new Promise(process.nextTick); // wait for promises to resolve + resolve(); + return res; + }); + // @ts-ignore + setTimeout(() => reject("timeout waiting for UserSelectionScreen to be shown."), 500); + await p; + await new Promise(process.nextTick); // wait for promises to resolve + } + + async function testUserScreen(onShown, users) { + if (!users) { + users = {}; + } + const starting = start({ + resetEnv: true, + userConfig: { storage: "server", users }, + }); + + // Ensure no current user + expect(localStorage["Comfy.userId"]).toBeFalsy(); + expect(localStorage["Comfy.userName"]).toBeFalsy(); + + await waitForUserScreenShow(); + + const selection = document.querySelectorAll("#comfy-user-selection")?.[0]; + expect(selection).toBeTruthy(); + + // Ensure login is visible + expect(window.getComputedStyle(selection)?.display).not.toBe("none"); + // Ensure menu is hidden + const menu = document.querySelectorAll(".comfy-menu")?.[0]; + expect(window.getComputedStyle(menu)?.display).toBe("none"); + + const isCreate = await onShown(selection); + + // Submit form + selection.querySelectorAll("form")[0].submit(); + await new Promise(process.nextTick); // wait for promises to resolve + + // Wait for start + const s = await starting; + + // Ensure login is removed + expect(document.querySelectorAll("#comfy-user-selection")).toHaveLength(0); + expect(window.getComputedStyle(menu)?.display).not.toBe("none"); + + // Ensure settings + templates are saved + const { api } = require("../../web/scripts/api"); + expect(api.createUser).toHaveBeenCalledTimes(+isCreate); + expect(api.storeSettings).toHaveBeenCalledTimes(+isCreate); + expect(api.storeUserData).toHaveBeenCalledTimes(+isCreate); + if (isCreate) { + expect(api.storeUserData).toHaveBeenCalledWith("comfy.templates.json", null, { stringify: false }); + expect(s.app.isNewUserSession).toBeTruthy(); + } else { + expect(s.app.isNewUserSession).toBeFalsy(); + } + + return { users, selection, ...s }; + } + + it("allows user creation if no users", async () => { + const { users } = await testUserScreen((selection) => { + // Ensure we have no users flag added + expect(selection.classList.contains("no-users")).toBeTruthy(); + + // Enter a username + const input = selection.getElementsByTagName("input")[0]; + input.focus(); + input.value = "Test User"; + + return true; + }); + + expect(users).toStrictEqual({ + "Test User!": "Test User", + }); + + expect(localStorage["Comfy.userId"]).toBe("Test User!"); + expect(localStorage["Comfy.userName"]).toBe("Test User"); + }); + it("allows user creation if no current user but other users", async () => { + const users = { + "Test User 2!": "Test User 2", + }; + + await testUserScreen((selection) => { + expect(selection.classList.contains("no-users")).toBeFalsy(); + + // Enter a username + const input = selection.getElementsByTagName("input")[0]; + input.focus(); + input.value = "Test User 3"; + return true; + }, users); + + expect(users).toStrictEqual({ + "Test User 2!": "Test User 2", + "Test User 3!": "Test User 3", + }); + + expect(localStorage["Comfy.userId"]).toBe("Test User 3!"); + expect(localStorage["Comfy.userName"]).toBe("Test User 3"); + }); + it("allows user selection if no current user but other users", async () => { + const users = { + "A!": "A", + "B!": "B", + "C!": "C", + }; + + await testUserScreen((selection) => { + expect(selection.classList.contains("no-users")).toBeFalsy(); + + // Check user list + const select = selection.getElementsByTagName("select")[0]; + const options = select.getElementsByTagName("option"); + expect( + [...options] + .filter((o) => !o.disabled) + .reduce((p, n) => { + p[n.getAttribute("value")] = n.textContent; + return p; + }, {}) + ).toStrictEqual(users); + + // Select an option + select.focus(); + select.value = options[2].value; + + return false; + }, users); + + expect(users).toStrictEqual(users); + + expect(localStorage["Comfy.userId"]).toBe("B!"); + expect(localStorage["Comfy.userName"]).toBe("B"); + }); + it("doesnt show user screen if current user", async () => { + const starting = start({ + resetEnv: true, + userConfig: { + storage: "server", + users: { + "User!": "User", + }, + }, + localStorage: { + "Comfy.userId": "User!", + "Comfy.userName": "User", + }, + }); + await new Promise(process.nextTick); // wait for promises to resolve + + expectNoUserScreen(); + + await starting; + }); + it("allows user switching", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { + storage: "server", + users: { + "User!": "User", + }, + }, + localStorage: { + "Comfy.userId": "User!", + "Comfy.userName": "User", + }, + }); + + // cant actually test switching user easily but can check the setting is present + expect(app.ui.settings.settingsLookup["Comfy.SwitchUser"]).toBeTruthy(); + }); + }); + describe("single-user", () => { + it("doesnt show user creation if no default user", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: false, storage: "server" }, + }); + expectNoUserScreen(); + + // It should store the settings + const { api } = require("../../web/scripts/api"); + expect(api.storeSettings).toHaveBeenCalledTimes(1); + expect(api.storeUserData).toHaveBeenCalledTimes(1); + expect(api.storeUserData).toHaveBeenCalledWith("comfy.templates.json", null, { stringify: false }); + expect(app.isNewUserSession).toBeTruthy(); + }); + it("doesnt show user creation if default user", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: true, storage: "server" }, + }); + expectNoUserScreen(); + + // It should store the settings + const { api } = require("../../web/scripts/api"); + expect(api.storeSettings).toHaveBeenCalledTimes(0); + expect(api.storeUserData).toHaveBeenCalledTimes(0); + expect(app.isNewUserSession).toBeFalsy(); + }); + it("doesnt allow user switching", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: true, storage: "server" }, + }); + expectNoUserScreen(); + + expect(app.ui.settings.settingsLookup["Comfy.SwitchUser"]).toBeFalsy(); + }); + }); + describe("browser-user", () => { + it("doesnt show user creation if no default user", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: false, storage: "browser" }, + }); + expectNoUserScreen(); + + // It should store the settings + const { api } = require("../../web/scripts/api"); + expect(api.storeSettings).toHaveBeenCalledTimes(0); + expect(api.storeUserData).toHaveBeenCalledTimes(0); + expect(app.isNewUserSession).toBeFalsy(); + }); + it("doesnt show user creation if default user", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: true, storage: "server" }, + }); + expectNoUserScreen(); + + // It should store the settings + const { api } = require("../../web/scripts/api"); + expect(api.storeSettings).toHaveBeenCalledTimes(0); + expect(api.storeUserData).toHaveBeenCalledTimes(0); + expect(app.isNewUserSession).toBeFalsy(); + }); + it("doesnt allow user switching", async () => { + const { app } = await start({ + resetEnv: true, + userConfig: { migrated: true, storage: "browser" }, + }); + expectNoUserScreen(); + + expect(app.ui.settings.settingsLookup["Comfy.SwitchUser"]).toBeFalsy(); + }); + }); +}); diff --git a/tests-ui/utils/index.js b/tests-ui/utils/index.js index 6a08e8594e9..74b6cf93dbc 100644 --- a/tests-ui/utils/index.js +++ b/tests-ui/utils/index.js @@ -1,10 +1,18 @@ const { mockApi } = require("./setup"); const { Ez } = require("./ezgraph"); const lg = require("./litegraph"); +const fs = require("fs"); +const path = require("path"); + +const html = fs.readFileSync(path.resolve(__dirname, "../../web/index.html")) /** * - * @param { Parameters[0] & { resetEnv?: boolean, preSetup?(app): Promise } } config + * @param { Parameters[0] & { + * resetEnv?: boolean, + * preSetup?(app): Promise, + * localStorage?: Record + * } } config * @returns */ export async function start(config = {}) { @@ -12,12 +20,18 @@ export async function start(config = {}) { jest.resetModules(); jest.resetAllMocks(); lg.setup(global); + localStorage.clear(); + sessionStorage.clear(); } + Object.assign(localStorage, config.localStorage ?? {}); + document.body.innerHTML = html; + mockApi(config); const { app } = require("../../web/scripts/app"); config.preSetup?.(app); await app.setup(); + return { ...Ez.graph(app, global["LiteGraph"], global["LGraphCanvas"]), app }; } diff --git a/tests-ui/utils/setup.js b/tests-ui/utils/setup.js index dd150214a34..e46258943ed 100644 --- a/tests-ui/utils/setup.js +++ b/tests-ui/utils/setup.js @@ -18,9 +18,21 @@ function* walkSync(dir) { */ /** - * @param { { mockExtensions?: string[], mockNodeDefs?: Record } } config + * @param {{ + * mockExtensions?: string[], + * mockNodeDefs?: Record, +* settings?: Record +* userConfig?: {storage: "server" | "browser", users?: Record, migrated?: boolean }, +* userData?: Record + * }} config */ -export function mockApi({ mockExtensions, mockNodeDefs } = {}) { +export function mockApi(config = {}) { + let { mockExtensions, mockNodeDefs, userConfig, settings, userData } = { + userConfig, + settings: {}, + userData: {}, + ...config, + }; if (!mockExtensions) { mockExtensions = Array.from(walkSync(path.resolve("../web/extensions/core"))) .filter((x) => x.endsWith(".js")) @@ -40,6 +52,26 @@ export function mockApi({ mockExtensions, mockNodeDefs } = {}) { getNodeDefs: jest.fn(() => mockNodeDefs), init: jest.fn(), apiURL: jest.fn((x) => "../../web/" + x), + createUser: jest.fn((username) => { + if(username in userConfig.users) { + return { status: 400, json: () => "Duplicate" } + } + userConfig.users[username + "!"] = username; + return { status: 200, json: () => username + "!" } + }), + getUserConfig: jest.fn(() => userConfig ?? { storage: "browser", migrated: false }), + getSettings: jest.fn(() => settings), + storeSettings: jest.fn((v) => Object.assign(settings, v)), + getUserData: jest.fn((f) => { + if (f in userData) { + return { status: 200, json: () => userData[f] }; + } else { + return { status: 404 }; + } + }), + storeUserData: jest.fn((file, data) => { + userData[file] = data; + }), }; jest.mock("../../web/scripts/api", () => ({ get api() { diff --git a/web/extensions/core/nodeTemplates.js b/web/extensions/core/nodeTemplates.js index bc9a108644a..9350ba6549c 100644 --- a/web/extensions/core/nodeTemplates.js +++ b/web/extensions/core/nodeTemplates.js @@ -1,4 +1,5 @@ import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js"; import { ComfyDialog, $el } from "../../scripts/ui.js"; import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js"; @@ -20,16 +21,20 @@ import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js"; // Open the manage dialog and Drag and drop elements using the "Name:" label as handle const id = "Comfy.NodeTemplates"; +const file = "comfy.templates.json"; class ManageTemplates extends ComfyDialog { constructor() { super(); + this.load().then((v) => { + this.templates = v; + }); + this.element.classList.add("comfy-manage-templates"); - this.templates = this.load(); this.draggedEl = null; this.saveVisualCue = null; this.emptyImg = new Image(); - this.emptyImg.src = 'data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs='; + this.emptyImg.src = "data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs="; this.importInput = $el("input", { type: "file", @@ -67,17 +72,50 @@ class ManageTemplates extends ComfyDialog { return btns; } - load() { - const templates = localStorage.getItem(id); - if (templates) { - return JSON.parse(templates); + async load() { + let templates = []; + if (app.storageLocation === "server") { + if (app.isNewUserSession) { + // New user so migrate existing templates + const json = localStorage.getItem(id); + if (json) { + templates = JSON.parse(json); + } + await api.storeUserData(file, json, { stringify: false }); + } else { + const res = await api.getUserData(file); + if (res.status === 200) { + try { + templates = await res.json(); + } catch (error) { + } + } else if (res.status !== 404) { + console.error(res.status + " " + res.statusText); + } + } } else { - return []; + const json = localStorage.getItem(id); + if (json) { + templates = JSON.parse(json); + } } + + return templates ?? []; } - store() { - localStorage.setItem(id, JSON.stringify(this.templates)); + async store() { + if(app.storageLocation === "server") { + const templates = JSON.stringify(this.templates, undefined, 4); + localStorage.setItem(id, templates); // Backwards compatibility + try { + await api.storeUserData(file, templates, { stringify: false }); + } catch (error) { + console.error(error); + alert(error.message); + } + } else { + localStorage.setItem(id, JSON.stringify(this.templates)); + } } async importAll() { @@ -85,14 +123,14 @@ class ManageTemplates extends ComfyDialog { if (file.type === "application/json" || file.name.endsWith(".json")) { const reader = new FileReader(); reader.onload = async () => { - var importFile = JSON.parse(reader.result); - if (importFile && importFile?.templates) { + const importFile = JSON.parse(reader.result); + if (importFile?.templates) { for (const template of importFile.templates) { if (template?.name && template?.data) { this.templates.push(template); } } - this.store(); + await this.store(); } }; await reader.readAsText(file); @@ -159,7 +197,7 @@ class ManageTemplates extends ComfyDialog { e.currentTarget.style.border = "1px dashed transparent"; e.currentTarget.removeAttribute("draggable"); - // rearrange the elements in the localStorage + // rearrange the elements this.element.querySelectorAll('.tempateManagerRow').forEach((el,i) => { var prev_i = el.dataset.id; diff --git a/web/index.html b/web/index.html index 41bc246c090..094db9d1529 100644 --- a/web/index.html +++ b/web/index.html @@ -16,5 +16,33 @@ window.graph = app.graph; - + + + diff --git a/web/scripts/api.js b/web/scripts/api.js index 9aa7528af04..3a9bcc87a4e 100644 --- a/web/scripts/api.js +++ b/web/scripts/api.js @@ -12,6 +12,13 @@ class ComfyApi extends EventTarget { } fetchApi(route, options) { + if (!options) { + options = {}; + } + if (!options.headers) { + options.headers = {}; + } + options.headers["Comfy-User"] = this.user; return fetch(this.apiURL(route), options); } @@ -315,6 +322,99 @@ class ComfyApi extends EventTarget { async interrupt() { await this.#postItem("interrupt", null); } + + /** + * Gets user configuration data and where data should be stored + * @returns { Promise<{ storage: "server" | "browser", users?: Promise, migrated?: boolean }> } + */ + async getUserConfig() { + return (await this.fetchApi("/users")).json(); + } + + /** + * Creates a new user + * @param { string } username + * @returns The fetch response + */ + createUser(username) { + return this.fetchApi("/users", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ username }), + }); + } + + /** + * Gets all setting values for the current user + * @returns { Promise } A dictionary of id -> value + */ + async getSettings() { + return (await this.fetchApi("/settings")).json(); + } + + /** + * Gets a setting for the current user + * @param { string } id The id of the setting to fetch + * @returns { Promise } The setting value + */ + async getSetting(id) { + return (await this.fetchApi(`/settings/${encodeURIComponent(id)}`)).json(); + } + + /** + * Stores a dictionary of settings for the current user + * @param { Record } settings Dictionary of setting id -> value to save + * @returns { Promise } + */ + async storeSettings(settings) { + return this.fetchApi(`/settings`, { + method: "POST", + body: JSON.stringify(settings) + }); + } + + /** + * Stores a setting for the current user + * @param { string } id The id of the setting to update + * @param { unknown } value The value of the setting + * @returns { Promise } + */ + async storeSetting(id, value) { + return this.fetchApi(`/settings/${encodeURIComponent(id)}`, { + method: "POST", + body: JSON.stringify(value) + }); + } + + /** + * Gets a user data file for the current user + * @param { string } file The name of the userdata file to load + * @param { RequestInit } [options] + * @returns { Promise } The fetch response object + */ + async getUserData(file, options) { + return this.fetchApi(`/userdata/${encodeURIComponent(file)}`, options); + } + + /** + * Stores a user data file for the current user + * @param { string } file The name of the userdata file to save + * @param { unknown } data The data to save to the file + * @param { RequestInit & { stringify?: boolean, throwOnError?: boolean } } [options] + * @returns { Promise } + */ + async storeUserData(file, data, options = { stringify: true, throwOnError: true }) { + const resp = await this.fetchApi(`/userdata/${encodeURIComponent(file)}`, { + method: "POST", + body: options?.stringify ? JSON.stringify(data) : data, + ...options, + }); + if (resp.status !== 200) { + throw new Error(`Error storing user data file '${file}': ${resp.status} ${(await resp).statusText}`); + } + } } export const api = new ComfyApi(); diff --git a/web/scripts/app.js b/web/scripts/app.js index 7353f5a3bdd..72f9e86038f 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1291,10 +1291,92 @@ export class ComfyApp { await Promise.all(extensionPromises); } + async #migrateSettings() { + this.isNewUserSession = true; + // Store all current settings + const settings = Object.keys(this.ui.settings).reduce((p, n) => { + const v = localStorage[`Comfy.Settings.${n}`]; + if (v) { + try { + p[n] = JSON.parse(v); + } catch (error) {} + } + return p; + }, {}); + + await api.storeSettings(settings); + } + + async #setUser() { + const userConfig = await api.getUserConfig(); + this.storageLocation = userConfig.storage; + if (typeof userConfig.migrated == "boolean") { + // Single user mode migrated true/false for if the default user is created + if (!userConfig.migrated && this.storageLocation === "server") { + // Default user not created yet + await this.#migrateSettings(); + } + return; + } + + this.multiUserServer = true; + let user = localStorage["Comfy.userId"]; + const users = userConfig.users ?? {}; + if (!user || !users[user]) { + // This will rarely be hit so move the loading to on demand + const { UserSelectionScreen } = await import("./ui/userSelection.js"); + + this.ui.menuContainer.style.display = "none"; + const { userId, username, created } = await new UserSelectionScreen().show(users, user); + this.ui.menuContainer.style.display = ""; + + user = userId; + localStorage["Comfy.userName"] = username; + localStorage["Comfy.userId"] = user; + + if (created) { + api.user = user; + await this.#migrateSettings(); + } + } + + api.user = user; + + this.ui.settings.addSetting({ + id: "Comfy.SwitchUser", + name: "Switch User", + type: (name) => { + let currentUser = localStorage["Comfy.userName"]; + if (currentUser) { + currentUser = ` (${currentUser})`; + } + return $el("tr", [ + $el("td", [ + $el("label", { + textContent: name, + }), + ]), + $el("td", [ + $el("button", { + textContent: name + (currentUser ?? ""), + onclick: () => { + delete localStorage["Comfy.userId"]; + delete localStorage["Comfy.userName"]; + window.location.reload(); + }, + }), + ]), + ]); + }, + }); + } + /** * Set up the app on the page */ async setup() { + await this.#setUser(); + await this.ui.settings.load(); await this.#loadExtensions(); // Create and mount the LiteGraph in the DOM diff --git a/web/scripts/ui.js b/web/scripts/ui.js index ebaf86fe428..6887f70288a 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -1,4 +1,8 @@ -import {api} from "./api.js"; +import { api } from "./api.js"; +import { ComfyDialog as _ComfyDialog } from "./ui/dialog.js"; +import { ComfySettingsDialog } from "./ui/settings.js"; + +export const ComfyDialog = _ComfyDialog; export function $el(tag, propsOrChildren, children) { const split = tag.split("."); @@ -167,267 +171,6 @@ function dragElement(dragEl, settings) { } } -export class ComfyDialog { - constructor() { - this.element = $el("div.comfy-modal", {parent: document.body}, [ - $el("div.comfy-modal-content", [$el("p", {$: (p) => (this.textElement = p)}), ...this.createButtons()]), - ]); - } - - createButtons() { - return [ - $el("button", { - type: "button", - textContent: "Close", - onclick: () => this.close(), - }), - ]; - } - - close() { - this.element.style.display = "none"; - } - - show(html) { - if (typeof html === "string") { - this.textElement.innerHTML = html; - } else { - this.textElement.replaceChildren(html); - } - this.element.style.display = "flex"; - } -} - -class ComfySettingsDialog extends ComfyDialog { - constructor() { - super(); - this.element = $el("dialog", { - id: "comfy-settings-dialog", - parent: document.body, - }, [ - $el("table.comfy-modal-content.comfy-table", [ - $el("caption", {textContent: "Settings"}), - $el("tbody", {$: (tbody) => (this.textElement = tbody)}), - $el("button", { - type: "button", - textContent: "Close", - style: { - cursor: "pointer", - }, - onclick: () => { - this.element.close(); - }, - }), - ]), - ]); - this.settings = []; - } - - getSettingValue(id, defaultValue) { - const settingId = "Comfy.Settings." + id; - const v = localStorage[settingId]; - return v == null ? defaultValue : JSON.parse(v); - } - - setSettingValue(id, value) { - const settingId = "Comfy.Settings." + id; - localStorage[settingId] = JSON.stringify(value); - } - - addSetting({id, name, type, defaultValue, onChange, attrs = {}, tooltip = "", options = undefined}) { - if (!id) { - throw new Error("Settings must have an ID"); - } - - if (this.settings.find((s) => s.id === id)) { - throw new Error(`Setting ${id} of type ${type} must have a unique ID.`); - } - - const settingId = `Comfy.Settings.${id}`; - const v = localStorage[settingId]; - let value = v == null ? defaultValue : JSON.parse(v); - - // Trigger initial setting of value - if (onChange) { - onChange(value, undefined); - } - - this.settings.push({ - render: () => { - const setter = (v) => { - if (onChange) { - onChange(v, value); - } - localStorage[settingId] = JSON.stringify(v); - value = v; - }; - value = this.getSettingValue(id, defaultValue); - - let element; - const htmlID = id.replaceAll(".", "-"); - - const labelCell = $el("td", [ - $el("label", { - for: htmlID, - classList: [tooltip !== "" ? "comfy-tooltip-indicator" : ""], - textContent: name, - }) - ]); - - if (typeof type === "function") { - element = type(name, setter, value, attrs); - } else { - switch (type) { - case "boolean": - element = $el("tr", [ - labelCell, - $el("td", [ - $el("input", { - id: htmlID, - type: "checkbox", - checked: value, - onchange: (event) => { - const isChecked = event.target.checked; - if (onChange !== undefined) { - onChange(isChecked) - } - this.setSettingValue(id, isChecked); - }, - }), - ]), - ]) - break; - case "number": - element = $el("tr", [ - labelCell, - $el("td", [ - $el("input", { - type, - value, - id: htmlID, - oninput: (e) => { - setter(e.target.value); - }, - ...attrs - }), - ]), - ]); - break; - case "slider": - element = $el("tr", [ - labelCell, - $el("td", [ - $el("div", { - style: { - display: "grid", - gridAutoFlow: "column", - }, - }, [ - $el("input", { - ...attrs, - value, - type: "range", - oninput: (e) => { - setter(e.target.value); - e.target.nextElementSibling.value = e.target.value; - }, - }), - $el("input", { - ...attrs, - value, - id: htmlID, - type: "number", - style: {maxWidth: "4rem"}, - oninput: (e) => { - setter(e.target.value); - e.target.previousElementSibling.value = e.target.value; - }, - }), - ]), - ]), - ]); - break; - case "combo": - element = $el("tr", [ - labelCell, - $el("td", [ - $el( - "select", - { - oninput: (e) => { - setter(e.target.value); - }, - }, - (typeof options === "function" ? options(value) : options || []).map((opt) => { - if (typeof opt === "string") { - opt = { text: opt }; - } - const v = opt.value ?? opt.text; - return $el("option", { - value: v, - textContent: opt.text, - selected: value + "" === v + "", - }); - }) - ), - ]), - ]); - break; - case "text": - default: - if (type !== "text") { - console.warn(`Unsupported setting type '${type}, defaulting to text`); - } - - element = $el("tr", [ - labelCell, - $el("td", [ - $el("input", { - value, - id: htmlID, - oninput: (e) => { - setter(e.target.value); - }, - ...attrs, - }), - ]), - ]); - break; - } - } - if (tooltip) { - element.title = tooltip; - } - - return element; - }, - }); - - const self = this; - return { - get value() { - return self.getSettingValue(id, defaultValue); - }, - set value(v) { - self.setSettingValue(id, v); - }, - }; - } - - show() { - this.textElement.replaceChildren( - $el("tr", { - style: {display: "none"}, - }, [ - $el("th"), - $el("th", {style: {width: "33%"}}) - ]), - ...this.settings.map((s) => s.render()), - ) - this.element.showModal(); - } -} - class ComfyList { #type; #text; @@ -526,7 +269,7 @@ export class ComfyUI { constructor(app) { this.app = app; this.dialog = new ComfyDialog(); - this.settings = new ComfySettingsDialog(); + this.settings = new ComfySettingsDialog(app); this.batchCount = 1; this.lastQueueSize = 0; diff --git a/web/scripts/ui/dialog.js b/web/scripts/ui/dialog.js new file mode 100644 index 00000000000..aee93b3c84f --- /dev/null +++ b/web/scripts/ui/dialog.js @@ -0,0 +1,32 @@ +import { $el } from "../ui.js"; + +export class ComfyDialog { + constructor() { + this.element = $el("div.comfy-modal", { parent: document.body }, [ + $el("div.comfy-modal-content", [$el("p", { $: (p) => (this.textElement = p) }), ...this.createButtons()]), + ]); + } + + createButtons() { + return [ + $el("button", { + type: "button", + textContent: "Close", + onclick: () => this.close(), + }), + ]; + } + + close() { + this.element.style.display = "none"; + } + + show(html) { + if (typeof html === "string") { + this.textElement.innerHTML = html; + } else { + this.textElement.replaceChildren(html); + } + this.element.style.display = "flex"; + } +} diff --git a/web/scripts/ui/settings.js b/web/scripts/ui/settings.js new file mode 100644 index 00000000000..1cdba5cfe40 --- /dev/null +++ b/web/scripts/ui/settings.js @@ -0,0 +1,307 @@ +import { $el } from "../ui.js"; +import { api } from "../api.js"; +import { ComfyDialog } from "./dialog.js"; + +export class ComfySettingsDialog extends ComfyDialog { + constructor(app) { + super(); + this.app = app; + this.settingsValues = {}; + this.settingsLookup = {}; + this.element = $el( + "dialog", + { + id: "comfy-settings-dialog", + parent: document.body, + }, + [ + $el("table.comfy-modal-content.comfy-table", [ + $el("caption", { textContent: "Settings" }), + $el("tbody", { $: (tbody) => (this.textElement = tbody) }), + $el("button", { + type: "button", + textContent: "Close", + style: { + cursor: "pointer", + }, + onclick: () => { + this.element.close(); + }, + }), + ]), + ] + ); + } + + get settings() { + return Object.values(this.settingsLookup); + } + + async load() { + if (this.app.storageLocation === "browser") { + this.settingsValues = localStorage; + } else { + this.settingsValues = await api.getSettings(); + } + + // Trigger onChange for any settings added before load + for (const id in this.settingsLookup) { + this.settingsLookup[id].onChange?.(this.settingsValues[this.getId(id)]); + } + } + + getId(id) { + if (this.app.storageLocation === "browser") { + id = "Comfy.Settings." + id; + } + return id; + } + + getSettingValue(id, defaultValue) { + let value = this.settingsValues[this.getId(id)]; + if(value != null) { + if(this.app.storageLocation === "browser") { + try { + value = JSON.parse(value); + } catch (error) { + } + } + } + return value ?? defaultValue; + } + + async setSettingValueAsync(id, value) { + const json = JSON.stringify(value); + localStorage["Comfy.Settings." + id] = json; // backwards compatibility for extensions keep setting in storage + + let oldValue = this.getSettingValue(id, undefined); + this.settingsValues[this.getId(id)] = value; + + if (id in this.settingsLookup) { + this.settingsLookup[id].onChange?.(value, oldValue); + } + + await api.storeSetting(id, value); + } + + setSettingValue(id, value) { + this.setSettingValueAsync(id, value).catch((err) => { + alert(`Error saving setting '${id}'`); + console.error(err); + }); + } + + addSetting({ id, name, type, defaultValue, onChange, attrs = {}, tooltip = "", options = undefined }) { + if (!id) { + throw new Error("Settings must have an ID"); + } + + if (id in this.settingsLookup) { + throw new Error(`Setting ${id} of type ${type} must have a unique ID.`); + } + + let skipOnChange = false; + let value = this.getSettingValue(id); + if (value == null) { + if (this.app.isNewUserSession) { + // Check if we have a localStorage value but not a setting value and we are a new user + const localValue = localStorage["Comfy.Settings." + id]; + if (localValue) { + value = JSON.parse(localValue); + this.setSettingValue(id, value); // Store on the server + } + } + if (value == null) { + value = defaultValue; + } + } + + // Trigger initial setting of value + if (!skipOnChange) { + onChange?.(value, undefined); + } + + this.settingsLookup[id] = { + id, + onChange, + name, + render: () => { + const setter = (v) => { + if (onChange) { + onChange(v, value); + } + + this.setSettingValue(id, v); + value = v; + }; + value = this.getSettingValue(id, defaultValue); + + let element; + const htmlID = id.replaceAll(".", "-"); + + const labelCell = $el("td", [ + $el("label", { + for: htmlID, + classList: [tooltip !== "" ? "comfy-tooltip-indicator" : ""], + textContent: name, + }), + ]); + + if (typeof type === "function") { + element = type(name, setter, value, attrs); + } else { + switch (type) { + case "boolean": + element = $el("tr", [ + labelCell, + $el("td", [ + $el("input", { + id: htmlID, + type: "checkbox", + checked: value, + onchange: (event) => { + const isChecked = event.target.checked; + if (onChange !== undefined) { + onChange(isChecked); + } + this.setSettingValue(id, isChecked); + }, + }), + ]), + ]); + break; + case "number": + element = $el("tr", [ + labelCell, + $el("td", [ + $el("input", { + type, + value, + id: htmlID, + oninput: (e) => { + setter(e.target.value); + }, + ...attrs, + }), + ]), + ]); + break; + case "slider": + element = $el("tr", [ + labelCell, + $el("td", [ + $el( + "div", + { + style: { + display: "grid", + gridAutoFlow: "column", + }, + }, + [ + $el("input", { + ...attrs, + value, + type: "range", + oninput: (e) => { + setter(e.target.value); + e.target.nextElementSibling.value = e.target.value; + }, + }), + $el("input", { + ...attrs, + value, + id: htmlID, + type: "number", + style: { maxWidth: "4rem" }, + oninput: (e) => { + setter(e.target.value); + e.target.previousElementSibling.value = e.target.value; + }, + }), + ] + ), + ]), + ]); + break; + case "combo": + element = $el("tr", [ + labelCell, + $el("td", [ + $el( + "select", + { + oninput: (e) => { + setter(e.target.value); + }, + }, + (typeof options === "function" ? options(value) : options || []).map((opt) => { + if (typeof opt === "string") { + opt = { text: opt }; + } + const v = opt.value ?? opt.text; + return $el("option", { + value: v, + textContent: opt.text, + selected: value + "" === v + "", + }); + }) + ), + ]), + ]); + break; + case "text": + default: + if (type !== "text") { + console.warn(`Unsupported setting type '${type}, defaulting to text`); + } + + element = $el("tr", [ + labelCell, + $el("td", [ + $el("input", { + value, + id: htmlID, + oninput: (e) => { + setter(e.target.value); + }, + ...attrs, + }), + ]), + ]); + break; + } + } + if (tooltip) { + element.title = tooltip; + } + + return element; + }, + }; + + const self = this; + return { + get value() { + return self.getSettingValue(id, defaultValue); + }, + set value(v) { + self.setSettingValue(id, v); + }, + }; + } + + show() { + this.textElement.replaceChildren( + $el( + "tr", + { + style: { display: "none" }, + }, + [$el("th"), $el("th", { style: { width: "33%" } })] + ), + ...this.settings.sort((a, b) => a.name.localeCompare(b.name)).map((s) => s.render()) + ); + this.element.showModal(); + } +} diff --git a/web/scripts/ui/spinner.css b/web/scripts/ui/spinner.css new file mode 100644 index 00000000000..56da6072ee3 --- /dev/null +++ b/web/scripts/ui/spinner.css @@ -0,0 +1,34 @@ +.lds-ring { + display: inline-block; + position: relative; + width: 1em; + height: 1em; +} +.lds-ring div { + box-sizing: border-box; + display: block; + position: absolute; + width: 100%; + height: 100%; + border: 0.15em solid #fff; + border-radius: 50%; + animation: lds-ring 1.2s cubic-bezier(0.5, 0, 0.5, 1) infinite; + border-color: #fff transparent transparent transparent; +} +.lds-ring div:nth-child(1) { + animation-delay: -0.45s; +} +.lds-ring div:nth-child(2) { + animation-delay: -0.3s; +} +.lds-ring div:nth-child(3) { + animation-delay: -0.15s; +} +@keyframes lds-ring { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } +} diff --git a/web/scripts/ui/spinner.js b/web/scripts/ui/spinner.js new file mode 100644 index 00000000000..d049786f6a5 --- /dev/null +++ b/web/scripts/ui/spinner.js @@ -0,0 +1,9 @@ +import { addStylesheet } from "../utils.js"; + +addStylesheet(import.meta.url); + +export function createSpinner() { + const div = document.createElement("div"); + div.innerHTML = `
`; + return div.firstElementChild; +} diff --git a/web/scripts/ui/userSelection.css b/web/scripts/ui/userSelection.css new file mode 100644 index 00000000000..35c9d66148d --- /dev/null +++ b/web/scripts/ui/userSelection.css @@ -0,0 +1,135 @@ +.comfy-user-selection { + width: 100vw; + height: 100vh; + position: absolute; + top: 0; + left: 0; + z-index: 999; + display: flex; + align-items: center; + justify-content: center; + font-family: sans-serif; + background: linear-gradient(var(--tr-even-bg-color), var(--tr-odd-bg-color)); +} + +.comfy-user-selection-inner { + background: var(--comfy-menu-bg); + margin-top: -30vh; + padding: 20px 40px; + border-radius: 10px; + min-width: 365px; + position: relative; + box-shadow: 0 0 20px rgba(0, 0, 0, 0.3); +} + +.comfy-user-selection-inner form { + width: 100%; + display: flex; + flex-direction: column; + align-items: center; +} + +.comfy-user-selection-inner h1 { + margin: 10px 0 30px 0; + font-weight: normal; +} + +.comfy-user-selection-inner label { + display: flex; + flex-direction: column; + width: 100%; +} + +.comfy-user-selection input, +.comfy-user-selection select { + background-color: var(--comfy-input-bg); + color: var(--input-text); + border: 0; + border-radius: 5px; + padding: 5px; + margin-top: 10px; +} + +.comfy-user-selection input::placeholder { + color: var(--descrip-text); + opacity: 1; +} + +.comfy-user-existing { + width: 100%; +} + +.no-users .comfy-user-existing { + display: none; +} + +.comfy-user-selection-inner .or-separator { + margin: 10px 0; + padding: 10px; + display: block; + text-align: center; + width: 100%; + color: var(--descrip-text); +} + +.comfy-user-selection-inner .or-separator { + overflow: hidden; + text-align: center; + margin-left: -10px; +} + +.comfy-user-selection-inner .or-separator::before, +.comfy-user-selection-inner .or-separator::after { + content: ""; + background-color: var(--border-color); + position: relative; + height: 1px; + vertical-align: middle; + display: inline-block; + width: calc(50% - 20px); + top: -1px; +} + +.comfy-user-selection-inner .or-separator::before { + right: 10px; + margin-left: -50%; +} + +.comfy-user-selection-inner .or-separator::after { + left: 10px; + margin-right: -50%; +} + +.comfy-user-selection-inner section { + width: 100%; + padding: 10px; + margin: -10px; + transition: background-color 0.2s; +} + +.comfy-user-selection-inner section.selected { + background: var(--border-color); + border-radius: 5px; +} + +.comfy-user-selection-inner footer { + display: flex; + flex-direction: column; + align-items: center; + margin-top: 20px; +} + +.comfy-user-selection-inner .comfy-user-error { + color: var(--error-text); + margin-bottom: 10px; +} + +.comfy-user-button-next { + font-size: 16px; + padding: 6px 10px; + width: 100px; + display: flex; + gap: 5px; + align-items: center; + justify-content: center; +} \ No newline at end of file diff --git a/web/scripts/ui/userSelection.js b/web/scripts/ui/userSelection.js new file mode 100644 index 00000000000..f9f1ca8071a --- /dev/null +++ b/web/scripts/ui/userSelection.js @@ -0,0 +1,114 @@ +import { api } from "../api.js"; +import { $el } from "../ui.js"; +import { addStylesheet } from "../utils.js"; +import { createSpinner } from "./spinner.js"; + +export class UserSelectionScreen { + async show(users, user) { + // This will rarely be hit so move the loading to on demand + await addStylesheet(import.meta.url); + const userSelection = document.getElementById("comfy-user-selection"); + userSelection.style.display = ""; + return new Promise((resolve) => { + const input = userSelection.getElementsByTagName("input")[0]; + const select = userSelection.getElementsByTagName("select")[0]; + const inputSection = input.closest("section"); + const selectSection = select.closest("section"); + const form = userSelection.getElementsByTagName("form")[0]; + const error = userSelection.getElementsByClassName("comfy-user-error")[0]; + const button = userSelection.getElementsByClassName("comfy-user-button-next")[0]; + + let inputActive = null; + input.addEventListener("focus", () => { + inputSection.classList.add("selected"); + selectSection.classList.remove("selected"); + inputActive = true; + }); + select.addEventListener("focus", () => { + inputSection.classList.remove("selected"); + selectSection.classList.add("selected"); + inputActive = false; + select.style.color = ""; + }); + select.addEventListener("blur", () => { + if (!select.value) { + select.style.color = "var(--descrip-text)"; + } + }); + + form.addEventListener("submit", async (e) => { + e.preventDefault(); + if (inputActive == null) { + error.textContent = "Please enter a username or select an existing user."; + } else if (inputActive) { + const username = input.value.trim(); + if (!username) { + error.textContent = "Please enter a username."; + return; + } + + // Create new user + input.disabled = select.disabled = input.readonly = select.readonly = true; + const spinner = createSpinner(); + button.prepend(spinner); + try { + const resp = await api.createUser(username); + if (resp.status >= 300) { + let message = "Error creating user: " + resp.status + " " + resp.statusText; + try { + const res = await resp.json(); + if(res.error) { + message = res.error; + } + } catch (error) { + } + throw new Error(message); + } + + resolve({ username, userId: await resp.json(), created: true }); + } catch (err) { + spinner.remove(); + error.textContent = err.message ?? err.statusText ?? err ?? "An unknown error occurred."; + input.disabled = select.disabled = input.readonly = select.readonly = false; + return; + } + } else if (!select.value) { + error.textContent = "Please select an existing user."; + return; + } else { + resolve({ username: users[select.value], userId: select.value, created: false }); + } + }); + + if (user) { + const name = localStorage["Comfy.userName"]; + if (name) { + input.value = name; + } + } + if (input.value) { + // Focus the input, do this separately as sometimes browsers like to fill in the value + input.focus(); + } + + const userIds = Object.keys(users ?? {}); + if (userIds.length) { + for (const u of userIds) { + $el("option", { textContent: users[u], value: u, parent: select }); + } + select.style.color = "var(--descrip-text)"; + + if (select.value) { + // Focus the select, do this separately as sometimes browsers like to fill in the value + select.focus(); + } + } else { + userSelection.classList.add("no-users"); + input.focus(); + } + }).then((r) => { + userSelection.remove(); + return r; + }); + } +} diff --git a/web/scripts/utils.js b/web/scripts/utils.js index 401aca9e487..01b98846218 100644 --- a/web/scripts/utils.js +++ b/web/scripts/utils.js @@ -1,3 +1,5 @@ +import { $el } from "./ui.js"; + // Simple date formatter const parts = { d: (d) => d.getDate(), @@ -65,3 +67,22 @@ export function applyTextReplacements(app, value) { return ((widget.value ?? "") + "").replaceAll(/\/|\\/g, "_"); }); } + +export async function addStylesheet(urlOrFile, relativeTo) { + return new Promise((res, rej) => { + let url; + if (urlOrFile.endsWith(".js")) { + url = urlOrFile.substr(0, urlOrFile.length - 2) + "css"; + } else { + url = new URL(urlOrFile, relativeTo ?? `${window.location.protocol}//${window.location.host}`).toString(); + } + $el("link", { + parent: document.head, + rel: "stylesheet", + type: "text/css", + href: url, + onload: res, + onerror: rej, + }); + }); +} diff --git a/web/style.css b/web/style.css index 630eea12e6d..c57fb7f735d 100644 --- a/web/style.css +++ b/web/style.css @@ -121,6 +121,7 @@ body { width: 100%; } +.comfy-btn, .comfy-menu > button, .comfy-menu-btns button, .comfy-menu .comfy-list button, @@ -133,6 +134,7 @@ body { margin-top: 2px; } +.comfy-btn:hover:not(:disabled), .comfy-menu > button:hover, .comfy-menu-btns button:hover, .comfy-menu .comfy-list button:hover, From 2d74fc436058c98e5182d2123700ae244ccbe037 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 8 Jan 2024 17:08:00 -0500 Subject: [PATCH 379/420] Fix issue with user manager parent dir not being created. --- app/user_manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app/user_manager.py b/app/user_manager.py index 5a1031744e7..75485ee4239 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -63,7 +63,6 @@ def get_request_user_filepath(self, request, file, type="userdata", create_dir=T path = os.path.abspath(os.path.join(user_root, file)) if os.path.commonpath((user_root, path)) != user_root: return None - parent = os.path.join(path, os.pardir) if create_dir and not os.path.exists(parent): os.mkdir(parent) From b3b5ddb07a23b3d070df292c7a7fd6f83dc8fd50 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 8 Jan 2024 17:08:17 -0500 Subject: [PATCH 380/420] Support I mode images in LoadImageMask. --- nodes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nodes.py b/nodes.py index 1ea0669b100..96d9e51a73f 100644 --- a/nodes.py +++ b/nodes.py @@ -1472,6 +1472,8 @@ def load_image(self, image, channel): i = Image.open(image_path) i = ImageOps.exif_transpose(i) if i.getbands() != ("R", "G", "B", "A"): + if i.mode == 'I': + i = i.point(lambda i: i * (1 / 255)) i = i.convert("RGBA") mask = None c = channel[0].upper() From 6a7bc35db845179a26e62534f3d4b789151e52fe Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 9 Jan 2024 13:46:52 -0500 Subject: [PATCH 381/420] Use basic attention implementation for small inputs on old pytorch. --- comfy/ldm/modules/attention.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 309240d5cc7..fd8888d0ee3 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -351,8 +351,11 @@ def attention_pytorch(q, k, v, heads, mask=None): optimized_attention_masked = optimized_attention def optimized_attention_for_device(device, mask=False, small_input=False): - if small_input and model_management.pytorch_attention_enabled(): - return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases + if small_input: + if model_management.pytorch_attention_enabled(): + return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases + else: + return attention_basic if device == torch.device("cpu"): return attention_sub_quad From 2c80d9acb9a542fef843954370390369093b6ac4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 9 Jan 2024 15:12:12 -0500 Subject: [PATCH 382/420] Round up to nearest power of 2 in SAG node to fix some resolution issues. --- comfy_extras/nodes_sag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index 450ac3eeacd..b307b4f5662 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -58,7 +58,7 @@ def create_blur_map(x0, attn, sigma=3.0, threshold=1.0): attn = attn.reshape(b, -1, hw1, hw2) # Global Average Pool mask = attn.mean(1, keepdim=False).sum(1, keepdim=False) > threshold - ratio = math.ceil(math.sqrt(lh * lw / hw1)) + ratio = 2**(math.ceil(math.sqrt(lh * lw / hw1)) - 1).bit_length() mid_shape = [math.ceil(lh / ratio), math.ceil(lw / ratio)] # Reshape From 1a57423d30da06283dd20a7aee2aadcb58c37e59 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 10 Jan 2024 04:00:49 -0500 Subject: [PATCH 383/420] Fix issue when using multiple t2i adapters with batched images. --- comfy/controlnet.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/comfy/controlnet.py b/comfy/controlnet.py index 8404054f38f..df474201c2c 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -126,7 +126,10 @@ def control_merge(self, control_input, control_output, control_prev, output_dtyp if o[i] is None: o[i] = prev_val else: - o[i] += prev_val + if o[i].shape[0] < prev_val.shape[0]: + o[i] = prev_val + o[i] + else: + o[i] += prev_val return out class ControlNet(ControlBase): From b4e915e74560bd2c090f9b4ed6b73b0781b7050e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 10 Jan 2024 04:08:43 -0500 Subject: [PATCH 384/420] Skip SAG when latent is too small. --- comfy_extras/nodes_sag.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy_extras/nodes_sag.py b/comfy_extras/nodes_sag.py index b307b4f5662..bbd3808078d 100644 --- a/comfy_extras/nodes_sag.py +++ b/comfy_extras/nodes_sag.py @@ -143,6 +143,8 @@ def post_cfg_function(args): sigma = args["sigma"] model_options = args["model_options"] x = args["input"] + if min(cfg_result.shape[2:]) <= 4: #skip when too small to add padding + return cfg_result # create the adversarially blurred image degraded = create_blur_map(uncond_pred, uncond_attn, sag_sigma, sag_threshold) From 10f2609fdd2231faf40346df11372601385b6cbb Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 11 Jan 2024 03:15:27 -0500 Subject: [PATCH 385/420] Add InpaintModelConditioning node. This is an alternative to VAE Encode for inpaint that should work with lower denoise. This is a different take on #2501 --- comfy/model_base.py | 26 ++++++++++++++++--- nodes.py | 62 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 6 deletions(-) diff --git a/comfy/model_base.py b/comfy/model_base.py index f59526204cb..52c87ede684 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -100,11 +100,29 @@ def extra_conds(self, **kwargs): if self.inpaint_model: concat_keys = ("mask", "masked_image") cond_concat = [] - denoise_mask = kwargs.get("denoise_mask", None) - latent_image = kwargs.get("latent_image", None) + denoise_mask = kwargs.get("concat_mask", kwargs.get("denoise_mask", None)) + concat_latent_image = kwargs.get("concat_latent_image", None) + if concat_latent_image is None: + concat_latent_image = kwargs.get("latent_image", None) + else: + concat_latent_image = self.process_latent_in(concat_latent_image) + noise = kwargs.get("noise", None) device = kwargs["device"] + if concat_latent_image.shape[1:] != noise.shape[1:]: + concat_latent_image = utils.common_upscale(concat_latent_image, noise.shape[-1], noise.shape[-2], "bilinear", "center") + + concat_latent_image = utils.resize_to_batch_size(concat_latent_image, noise.shape[0]) + + if len(denoise_mask.shape) == len(noise.shape): + denoise_mask = denoise_mask[:,:1] + + denoise_mask = denoise_mask.reshape((-1, 1, denoise_mask.shape[-2], denoise_mask.shape[-1])) + if denoise_mask.shape[-2:] != noise.shape[-2:]: + denoise_mask = utils.common_upscale(denoise_mask, noise.shape[-1], noise.shape[-2], "bilinear", "center") + denoise_mask = utils.resize_to_batch_size(denoise_mask.round(), noise.shape[0]) + def blank_inpaint_image_like(latent_image): blank_image = torch.ones_like(latent_image) # these are the values for "zero" in pixel space translated to latent space @@ -117,9 +135,9 @@ def blank_inpaint_image_like(latent_image): for ck in concat_keys: if denoise_mask is not None: if ck == "mask": - cond_concat.append(denoise_mask[:,:1].to(device)) + cond_concat.append(denoise_mask.to(device)) elif ck == "masked_image": - cond_concat.append(latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space + cond_concat.append(concat_latent_image.to(device)) #NOTE: the latent_image should be masked by the mask in pixel space else: if ck == "mask": cond_concat.append(torch.ones_like(noise)[:,:1]) diff --git a/nodes.py b/nodes.py index 96d9e51a73f..6c7317b69b1 100644 --- a/nodes.py +++ b/nodes.py @@ -359,6 +359,62 @@ def encode(self, vae, pixels, mask, grow_mask_by=6): return ({"samples":t, "noise_mask": (mask_erosion[:,:,:x,:y].round())}, ) + +class InpaintModelConditioning: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "vae": ("VAE", ), + "pixels": ("IMAGE", ), + "mask": ("MASK", ), + }} + + RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + + CATEGORY = "conditioning/inpaint" + + def encode(self, positive, negative, pixels, vae, mask): + x = (pixels.shape[1] // 8) * 8 + y = (pixels.shape[2] // 8) * 8 + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(pixels.shape[1], pixels.shape[2]), mode="bilinear") + + orig_pixels = pixels + pixels = orig_pixels.clone() + if pixels.shape[1] != x or pixels.shape[2] != y: + x_offset = (pixels.shape[1] % 8) // 2 + y_offset = (pixels.shape[2] % 8) // 2 + pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] + mask = mask[:,:,x_offset:x + x_offset, y_offset:y + y_offset] + + m = (1.0 - mask.round()).squeeze(1) + for i in range(3): + pixels[:,:,:,i] -= 0.5 + pixels[:,:,:,i] *= m + pixels[:,:,:,i] += 0.5 + concat_latent = vae.encode(pixels) + orig_latent = vae.encode(orig_pixels) + + out_latent = {} + + out_latent["samples"] = orig_latent + out_latent["noise_mask"] = mask + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + d["concat_latent_image"] = concat_latent + d["concat_mask"] = mask + n = [t[0], d] + c.append(n) + out.append(c) + return (out[0], out[1], out_latent) + + class SaveLatent: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -1628,10 +1684,11 @@ def INPUT_TYPES(s): def expand_image(self, image, left, top, right, bottom, feathering): d1, d2, d3, d4 = image.size() - new_image = torch.zeros( + new_image = torch.ones( (d1, d2 + top + bottom, d3 + left + right, d4), dtype=torch.float32, - ) + ) * 0.5 + new_image[:, top:top + d2, left:left + d3, :] = image mask = torch.ones( @@ -1723,6 +1780,7 @@ def expand_image(self, image, left, top, right, bottom, feathering): "unCLIPCheckpointLoader": unCLIPCheckpointLoader, "GLIGENLoader": GLIGENLoader, "GLIGENTextBoxApply": GLIGENTextBoxApply, + "InpaintModelConditioning": InpaintModelConditioning, "CheckpointLoader": CheckpointLoader, "DiffusersLoader": DiffusersLoader, From 977eda19a6471fbff253dc92c3c2f1a4a67b1793 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 11 Jan 2024 03:29:58 -0500 Subject: [PATCH 386/420] Don't round noise mask. --- comfy/sample.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/sample.py b/comfy/sample.py index 4b0d15c49d1..5c8a7d13039 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -28,7 +28,6 @@ def prepare_noise(latent_image, seed, noise_inds=None): def prepare_mask(noise_mask, shape, device): """ensures noise mask is of proper dimensions""" noise_mask = torch.nn.functional.interpolate(noise_mask.reshape((-1, 1, noise_mask.shape[-2], noise_mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear") - noise_mask = noise_mask.round() noise_mask = torch.cat([noise_mask] * shape[1], dim=1) noise_mask = comfy.utils.repeat_to_batch_size(noise_mask, shape[0]) noise_mask = noise_mask.to(device) From 4ab0392f707b26767eebb6e7f00e55a474732d63 Mon Sep 17 00:00:00 2001 From: TFWol <9045213+TFWol@users.noreply.github.com> Date: Thu, 11 Jan 2024 06:34:33 -0800 Subject: [PATCH 387/420] Resolved crashing nodes caused by `FileNotFoundError` during directory traversal - Implemented a `try-except` block in the `recursive_search` function to handle `FileNotFoundError` gracefully. - When encountering a file or directory path that cannot be accessed (causing `FileNotFoundError`), the code now logs a warning and skips processing for that specific path instead of crashing the node (CheckpointLoaderSimple was usually the first to break). This allows the rest of the directory traversal to proceed without interruption. --- folder_paths.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/folder_paths.py b/folder_paths.py index 641e5f0b26a..4abed783be0 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -138,15 +138,20 @@ def recursive_search(directory, excluded_dir_names=None): excluded_dir_names = [] result = [] - dirs = {directory: os.path.getmtime(directory)} + dirs = {} for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] for file_name in filenames: relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory) result.append(relative_path) + for d in subdirs: path = os.path.join(dirpath, d) - dirs[path] = os.path.getmtime(path) + try: + dirs[path] = os.path.getmtime(path) + except FileNotFoundError: + print(f"Warning: Unable to access {path}. Skipping this path.") + continue return result, dirs def filter_files_extensions(files, extensions): From 1b3d65bd84c8026dea234643861491279886218c Mon Sep 17 00:00:00 2001 From: realazthat Date: Thu, 11 Jan 2024 08:38:18 -0500 Subject: [PATCH 388/420] Add error, status to /history endpoint --- execution.py | 70 +++++++++++++++++++++++++++++++++------------------- main.py | 7 +++++- 2 files changed, 51 insertions(+), 26 deletions(-) diff --git a/execution.py b/execution.py index 260a08970c0..554e8dc12b9 100644 --- a/execution.py +++ b/execution.py @@ -8,6 +8,7 @@ import traceback import gc import inspect +from typing import List, Literal, NamedTuple, Optional import torch import nodes @@ -275,8 +276,15 @@ def reset(self): self.outputs = {} self.object_storage = {} self.outputs_ui = {} + self.status_notes = [] + self.success = True self.old_prompt = {} + def add_note(self, event, data, broadcast: bool): + self.status_notes.append((event, data)) + if self.server.client_id is not None or broadcast: + self.server.send_sync(event, data, self.server.client_id) + def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, error, ex): node_id = error["node_id"] class_type = prompt[node_id]["class_type"] @@ -290,23 +298,22 @@ def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, e "node_type": class_type, "executed": list(executed), } - self.server.send_sync("execution_interrupted", mes, self.server.client_id) + self.add_note("execution_interrupted", mes, broadcast=True) else: - if self.server.client_id is not None: - mes = { - "prompt_id": prompt_id, - "node_id": node_id, - "node_type": class_type, - "executed": list(executed), - - "exception_message": error["exception_message"], - "exception_type": error["exception_type"], - "traceback": error["traceback"], - "current_inputs": error["current_inputs"], - "current_outputs": error["current_outputs"], - } - self.server.send_sync("execution_error", mes, self.server.client_id) + mes = { + "prompt_id": prompt_id, + "node_id": node_id, + "node_type": class_type, + "executed": list(executed), + "exception_message": error["exception_message"], + "exception_type": error["exception_type"], + "traceback": error["traceback"], + "current_inputs": error["current_inputs"], + "current_outputs": error["current_outputs"], + } + self.add_note("execution_error", mes, broadcast=False) + # Next, remove the subsequent outputs since they will not be executed to_delete = [] for o in self.outputs: @@ -327,8 +334,7 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): else: self.server.client_id = None - if self.server.client_id is not None: - self.server.send_sync("execution_start", { "prompt_id": prompt_id}, self.server.client_id) + self.add_note("execution_start", { "prompt_id": prompt_id}, broadcast=False) with torch.inference_mode(): #delete cached outputs if nodes don't exist for them @@ -361,8 +367,9 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): del d comfy.model_management.cleanup_models() - if self.server.client_id is not None: - self.server.send_sync("execution_cached", { "nodes": list(current_outputs) , "prompt_id": prompt_id}, self.server.client_id) + self.add_note("execution_cached", + { "nodes": list(current_outputs) , "prompt_id": prompt_id}, + broadcast=False) executed = set() output_node_id = None to_execute = [] @@ -378,8 +385,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): # This call shouldn't raise anything if there's an error deep in # the actual SD code, instead it will report the node where the # error was raised - success, error, ex = recursive_execute(self.server, prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage) - if success is not True: + self.success, error, ex = recursive_execute(self.server, prompt, self.outputs, output_node_id, extra_data, executed, prompt_id, self.outputs_ui, self.object_storage) + if self.success is not True: self.handle_execution_error(prompt_id, prompt, current_outputs, executed, error, ex) break @@ -731,14 +738,27 @@ def get(self, timeout=None): self.server.queue_updated() return (item, i) - def task_done(self, item_id, outputs): + class ExecutionStatus(NamedTuple): + status_str: Literal['success', 'error'] + completed: bool + notes: List[str] + + def task_done(self, item_id, outputs, + status: Optional['PromptQueue.ExecutionStatus']): with self.mutex: prompt = self.currently_running.pop(item_id) if len(self.history) > MAXIMUM_HISTORY_SIZE: self.history.pop(next(iter(self.history))) - self.history[prompt[1]] = { "prompt": prompt, "outputs": {} } - for o in outputs: - self.history[prompt[1]]["outputs"][o] = outputs[o] + + status_dict: dict|None = None + if status is not None: + status_dict = copy.deepcopy(status._asdict()) + + self.history[prompt[1]] = { + "prompt": prompt, + "outputs": copy.deepcopy(outputs), + 'status': status_dict, + } self.server.queue_updated() def get_current_queue(self): diff --git a/main.py b/main.py index 45d5d41b3b8..a40ad2a4c2c 100644 --- a/main.py +++ b/main.py @@ -110,7 +110,12 @@ def prompt_worker(q, server): e.execute(item[2], prompt_id, item[3], item[4]) need_gc = True - q.task_done(item_id, e.outputs_ui) + q.task_done(item_id, + e.outputs_ui, + status=execution.PromptQueue.ExecutionStatus( + status_str='success' if e.success else 'error', + completed=e.success, + notes=e.status_notes)) if server.client_id is not None: server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) From d4edd9bfa8c227175c668bb83f2c80cef43fc53e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 11 Jan 2024 15:13:38 -0500 Subject: [PATCH 389/420] Fix hypertile issue with high depths. --- comfy_extras/nodes_hypertile.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index e7446b2e540..544ae1b22e7 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -37,24 +37,24 @@ def INPUT_TYPES(s): def patch(self, model, tile_size, swap_size, max_depth, scale_depth): model_channels = model.model.model_config.unet_config["model_channels"] - apply_to = set() - temp = model_channels - for x in range(max_depth + 1): - apply_to.add(temp) - temp *= 2 - latent_tile_size = max(32, tile_size) // 8 self.temp = None def hypertile_in(q, k, v, extra_options): - if q.shape[-1] in apply_to: + model_chans = q.shape[-2] + orig_shape = extra_options['original_shape'] + apply_to = [] + for i in range(max_depth + 1): + apply_to.append((orig_shape[-2] / (2 ** i)) * (orig_shape[-1] / (2 ** i))) + + if model_chans in apply_to: shape = extra_options["original_shape"] aspect_ratio = shape[-1] / shape[-2] hw = q.size(1) h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio)) - factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1 + factor = (2 ** apply_to.index(model_chans)) if scale_depth else 1 nh = random_divisor(h, latent_tile_size * factor, swap_size) nw = random_divisor(w, latent_tile_size * factor, swap_size) From 53c8a99e6c00b5e20425100f6680cd9ea2652218 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 11 Jan 2024 17:21:40 -0500 Subject: [PATCH 390/420] Make server storage the default. Remove --server-storage argument. --- app/user_manager.py | 2 +- comfy/cli_args.py | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/app/user_manager.py b/app/user_manager.py index 75485ee4239..209094af15a 100644 --- a/app/user_manager.py +++ b/app/user_manager.py @@ -94,7 +94,7 @@ async def get_users(request): else: user_dir = self.get_request_user_filepath(request, None, create_dir=False) return web.json_response({ - "storage": "server" if args.server_storage else "browser", + "storage": "server", "migrated": os.path.exists(user_dir) }) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index c02bbf2ba7b..b4bbfbfab53 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -112,8 +112,7 @@ class LatentPreviewMethod(enum.Enum): parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.") -parser.add_argument("--server-storage", action="store_true", help="Saves settings and other user configuration on the server instead of in browser storage.") -parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage. If enabled, server-storage will be unconditionally enabled.") +parser.add_argument("--multi-user", action="store_true", help="Enables per-user storage.") if comfy.options.args_parsing: args = parser.parse_args() @@ -125,6 +124,3 @@ class LatentPreviewMethod(enum.Enum): if args.disable_auto_launch: args.auto_launch = False - -if args.multi_user: - args.server_storage = True \ No newline at end of file From bcc0bde2afa7028939af737b46533b465baeb22d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 12 Jan 2024 17:21:22 -0500 Subject: [PATCH 391/420] Clear status notes on execution start. --- execution.py | 1 + 1 file changed, 1 insertion(+) diff --git a/execution.py b/execution.py index 554e8dc12b9..752e1d5aa53 100644 --- a/execution.py +++ b/execution.py @@ -334,6 +334,7 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): else: self.server.client_id = None + self.status_notes = [] self.add_note("execution_start", { "prompt_id": prompt_id}, broadcast=False) with torch.inference_mode(): From 56d9496b18baa5946834d1982908df0091e1c925 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 12 Jan 2024 18:17:06 -0500 Subject: [PATCH 392/420] Rename status notes to status messages. I think message describes them better. --- execution.py | 18 +++++++++--------- main.py | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/execution.py b/execution.py index 752e1d5aa53..e91e9a410e0 100644 --- a/execution.py +++ b/execution.py @@ -276,12 +276,12 @@ def reset(self): self.outputs = {} self.object_storage = {} self.outputs_ui = {} - self.status_notes = [] + self.status_messages = [] self.success = True self.old_prompt = {} - def add_note(self, event, data, broadcast: bool): - self.status_notes.append((event, data)) + def add_message(self, event, data, broadcast: bool): + self.status_messages.append((event, data)) if self.server.client_id is not None or broadcast: self.server.send_sync(event, data, self.server.client_id) @@ -298,7 +298,7 @@ def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, e "node_type": class_type, "executed": list(executed), } - self.add_note("execution_interrupted", mes, broadcast=True) + self.add_message("execution_interrupted", mes, broadcast=True) else: mes = { "prompt_id": prompt_id, @@ -312,7 +312,7 @@ def handle_execution_error(self, prompt_id, prompt, current_outputs, executed, e "current_inputs": error["current_inputs"], "current_outputs": error["current_outputs"], } - self.add_note("execution_error", mes, broadcast=False) + self.add_message("execution_error", mes, broadcast=False) # Next, remove the subsequent outputs since they will not be executed to_delete = [] @@ -334,8 +334,8 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): else: self.server.client_id = None - self.status_notes = [] - self.add_note("execution_start", { "prompt_id": prompt_id}, broadcast=False) + self.status_messages = [] + self.add_message("execution_start", { "prompt_id": prompt_id}, broadcast=False) with torch.inference_mode(): #delete cached outputs if nodes don't exist for them @@ -368,7 +368,7 @@ def execute(self, prompt, prompt_id, extra_data={}, execute_outputs=[]): del d comfy.model_management.cleanup_models() - self.add_note("execution_cached", + self.add_message("execution_cached", { "nodes": list(current_outputs) , "prompt_id": prompt_id}, broadcast=False) executed = set() @@ -742,7 +742,7 @@ def get(self, timeout=None): class ExecutionStatus(NamedTuple): status_str: Literal['success', 'error'] completed: bool - notes: List[str] + messages: List[str] def task_done(self, item_id, outputs, status: Optional['PromptQueue.ExecutionStatus']): diff --git a/main.py b/main.py index a40ad2a4c2c..69d9bce6cb7 100644 --- a/main.py +++ b/main.py @@ -115,7 +115,7 @@ def prompt_worker(q, server): status=execution.PromptQueue.ExecutionStatus( status_str='success' if e.success else 'error', completed=e.success, - notes=e.status_notes)) + messages=e.status_messages)) if server.client_id is not None: server.send_sync("executing", { "node": None, "prompt_id": prompt_id }, server.client_id) From df49a727ff0c786adf316b17632124076e847e16 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 13 Jan 2024 17:00:30 +0000 Subject: [PATCH 393/420] Fix modifiers triggering key down checks --- web/extensions/core/undoRedo.js | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js index 3cb137520f4..ff976c74c3e 100644 --- a/web/extensions/core/undoRedo.js +++ b/web/extensions/core/undoRedo.js @@ -106,6 +106,7 @@ const bindInput = (activeEl) => { } }; +let keyIgnored = false; window.addEventListener( "keydown", (e) => { @@ -116,6 +117,9 @@ window.addEventListener( return; } + keyIgnored = e.key === "Control" || e.key === "Shift" || e.key === "Alt" || e.key === "Meta"; + if (keyIgnored) return; + // Check if this is a ctrl+z ctrl+y if (await undoRedo(e)) return; @@ -127,6 +131,13 @@ window.addEventListener( true ); +window.addEventListener("keyup", (e) => { + if (keyIgnored) { + keyIgnored = false; + checkState(); + } +}); + // Handle clicking DOM elements (e.g. widgets) window.addEventListener("mouseup", () => { checkState(); From 32034217ae14bb0dac3108b1e4e8454f036a9b2b Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 13 Jan 2024 18:57:47 +0000 Subject: [PATCH 394/420] add setting to change control after generate to run before --- web/scripts/app.js | 11 +++++++- web/scripts/widgets.js | 64 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 73 insertions(+), 2 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index 72f9e86038f..d131045d7ab 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -1,5 +1,5 @@ import { ComfyLogging } from "./logging.js"; -import { ComfyWidgets } from "./widgets.js"; +import { ComfyWidgets, initWidgets } from "./widgets.js"; import { ComfyUI, $el } from "./ui.js"; import { api } from "./api.js"; import { defaultGraph } from "./defaultGraph.js"; @@ -1420,6 +1420,7 @@ export class ComfyApp { await this.#invokeExtensionsAsync("init"); await this.registerNodes(); + initWidgets(this); // Load previous workflow let restored = false; @@ -1774,6 +1775,14 @@ export class ComfyApp { */ async graphToPrompt() { for (const outerNode of this.graph.computeExecutionOrder(false)) { + if (outerNode.widgets) { + for (const widget of outerNode.widgets) { + // Allow widgets to run callbacks before a prompt has been queued + // e.g. random seed before every gen + widget.beforeQueued?.(); + } + } + const innerNodes = outerNode.getInnerNodes ? outerNode.getInnerNodes() : [outerNode]; for (const node of innerNodes) { if (node.isVirtualNode) { diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index e2e21164db8..36429266a25 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -1,6 +1,19 @@ import { api } from "./api.js" import "./domWidget.js"; +let controlValueRunBefore = false; +function updateControlWidgetLabel(widget) { + let replacement = "after"; + let find = "before"; + if (controlValueRunBefore) { + [find, replacement] = [replacement, find] + } + widget.label = (widget.label ?? widget.name).replace(find, replacement); +} + +const IS_CONTROL_WIDGET = Symbol(); +const HAS_EXECUTED = Symbol(); + function getNumberDefaults(inputData, defaultStep, precision, enable_rounding) { let defaultVal = inputData[1]["default"]; let { min, max, step, round} = inputData[1]; @@ -62,6 +75,8 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando serialize: false, // Don't include this in prompt. } ); + valueControl[IS_CONTROL_WIDGET] = true; + updateControlWidgetLabel(valueControl); widgets.push(valueControl); const isCombo = targetWidget.type === "combo"; @@ -76,10 +91,12 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando serialize: false, // Don't include this in prompt. } ); + updateControlWidgetLabel(comboFilter); + widgets.push(comboFilter); } - valueControl.afterQueued = () => { + const applyWidgetControl = () => { var v = valueControl.value; if (isCombo && v !== "fixed") { @@ -159,6 +176,23 @@ export function addValueControlWidgets(node, targetWidget, defaultValue = "rando targetWidget.callback(targetWidget.value); } }; + + valueControl.beforeQueued = () => { + if (controlValueRunBefore) { + // Don't run on first execution + if (valueControl[HAS_EXECUTED]) { + applyWidgetControl(); + } + } + valueControl[HAS_EXECUTED] = true; + }; + + valueControl.afterQueued = () => { + if (!controlValueRunBefore) { + applyWidgetControl(); + } + }; + return widgets; }; @@ -224,6 +258,34 @@ function isSlider(display, app) { return (display==="slider") ? "slider" : "number" } +export function initWidgets(app) { + app.ui.settings.addSetting({ + id: "Comfy.WidgetControlMode", + name: "Widget Value Control Mode", + type: "combo", + defaultValue: "after", + options: ["before", "after"], + tooltip: "Controls when widget values are updated (randomize/increment/decrement), either before the prompt is queued or after.", + onChange(value) { + controlValueRunBefore = value === "before"; + for (const n of app.graph._nodes) { + if (!n.widgets) continue; + for (const w of n.widgets) { + if (w[IS_CONTROL_WIDGET]) { + updateControlWidgetLabel(w); + if (w.linkedWidgets) { + for (const l of w.linkedWidgets) { + updateControlWidgetLabel(l); + } + } + } + } + } + app.graph.setDirtyCanvas(true); + }, + }); +} + export const ComfyWidgets = { "INT:seed": seedWidget, "INT:noise_seed": seedWidget, From 8e916735c01a238251eb1dc35e9eef59284f0ca8 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 13 Jan 2024 18:57:59 +0000 Subject: [PATCH 395/420] export function --- web/scripts/widgets.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/widgets.js b/web/scripts/widgets.js index 36429266a25..0529b1d80b5 100644 --- a/web/scripts/widgets.js +++ b/web/scripts/widgets.js @@ -2,7 +2,7 @@ import { api } from "./api.js" import "./domWidget.js"; let controlValueRunBefore = false; -function updateControlWidgetLabel(widget) { +export function updateControlWidgetLabel(widget) { let replacement = "after"; let find = "before"; if (controlValueRunBefore) { From 18511dd581cf8da701cc60af07bb947fa4a79656 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 13 Jan 2024 20:43:20 +0000 Subject: [PATCH 396/420] Manage group nodes (#2455) * wip group manage * prototyping ui * tweaks * wip * wip * more wip * fixes add deletion * Fix tests * fixes * Remove test code * typo * fix crash when link is invalid --- web/extensions/core/groupNode.js | 161 ++++++--- web/extensions/core/groupNodeManage.css | 149 +++++++++ web/extensions/core/groupNodeManage.js | 422 ++++++++++++++++++++++++ web/jsconfig.json | 3 +- web/scripts/domWidget.js | 3 +- web/scripts/ui.js | 20 +- web/scripts/ui/draggableList.js | 287 ++++++++++++++++ web/style.css | 11 +- 8 files changed, 1007 insertions(+), 49 deletions(-) create mode 100644 web/extensions/core/groupNodeManage.css create mode 100644 web/extensions/core/groupNodeManage.js create mode 100644 web/scripts/ui/draggableList.js diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 4cf1f7621b9..7feb2becc16 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -1,6 +1,7 @@ import { app } from "../../scripts/app.js"; import { api } from "../../scripts/api.js"; import { mergeIfValid } from "./widgetInputs.js"; +import { ManageGroupDialog } from "./groupNodeManage.js"; const GROUP = Symbol(); @@ -61,11 +62,7 @@ class GroupNodeBuilder { ); return; case Workflow.InUse.Registered: - if ( - !confirm( - "An group node with this name already exists embedded in this workflow, are you sure you want to overwrite it?" - ) - ) { + if (!confirm("A group node with this name already exists embedded in this workflow, are you sure you want to overwrite it?")) { return; } break; @@ -151,6 +148,8 @@ export class GroupNodeConfig { this.primitiveDefs = {}; this.widgetToPrimitive = {}; this.primitiveToWidget = {}; + this.nodeInputs = {}; + this.outputVisibility = []; } async registerType(source = "workflow") { @@ -158,6 +157,7 @@ export class GroupNodeConfig { output: [], output_name: [], output_is_list: [], + output_is_hidden: [], name: source + "/" + this.name, display_name: this.name, category: "group nodes" + ("/" + source), @@ -277,8 +277,7 @@ export class GroupNodeConfig { } if (input.widget) { const targetDef = globalDefs[node.type]; - const targetWidget = - targetDef.input.required[input.widget.name] ?? targetDef.input.optional[input.widget.name]; + const targetWidget = targetDef.input.required[input.widget.name] ?? targetDef.input.optional[input.widget.name]; const widget = [targetWidget[0], config]; const res = mergeIfValid( @@ -330,7 +329,8 @@ export class GroupNodeConfig { } getInputConfig(node, inputName, seenInputs, config, extra) { - let name = node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName; + const customConfig = this.nodeData.config?.[node.index]?.input?.[inputName]; + let name = customConfig?.name ?? node.inputs?.find((inp) => inp.name === inputName)?.label ?? inputName; let key = name; let prefix = ""; // Special handling for primitive to include the title if it is set rather than just "value" @@ -356,7 +356,7 @@ export class GroupNodeConfig { config = [config[0], { ...config[1], ...extra }]; } - return { name, config }; + return { name, config, customConfig }; } processWidgetInputs(inputs, node, inputNames, seenInputs) { @@ -366,9 +366,7 @@ export class GroupNodeConfig { for (const inputName of inputNames) { let widgetType = app.getWidgetType(inputs[inputName], inputName); if (widgetType) { - const convertedIndex = node.inputs?.findIndex( - (inp) => inp.name === inputName && inp.widget?.name === inputName - ); + const convertedIndex = node.inputs?.findIndex((inp) => inp.name === inputName && inp.widget?.name === inputName); if (convertedIndex > -1) { // This widget has been converted to a widget // We need to store this in the correct position so link ids line up @@ -424,6 +422,7 @@ export class GroupNodeConfig { } processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs) { + this.nodeInputs[node.index] = {}; for (let i = 0; i < slots.length; i++) { const inputName = slots[i]; if (linksTo[i]) { @@ -432,7 +431,11 @@ export class GroupNodeConfig { continue; } - const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + const { name, config, customConfig } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName]); + + this.nodeInputs[node.index][inputName] = name; + if(customConfig?.visible === false) continue; + this.nodeDef.input.required[name] = config; inputMap[i] = this.inputCount++; } @@ -452,6 +455,7 @@ export class GroupNodeConfig { const { name, config } = this.getInputConfig(node, inputName, seenInputs, inputs[inputName], { defaultInput: true, }); + this.nodeDef.input.required[name] = config; this.newToOldWidgetMap[name] = { node, inputName }; @@ -477,9 +481,7 @@ export class GroupNodeConfig { this.processInputSlots(inputs, node, slots, linksTo, inputMap, seenInputs); // Converted inputs have to be processed after all other nodes as they'll be at the end of the list - this.#convertedToProcess.push(() => - this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs) - ); + this.#convertedToProcess.push(() => this.processConvertedWidgets(inputs, node, slots, converted, linksTo, inputMap, seenInputs)); return inputMapping; } @@ -490,8 +492,12 @@ export class GroupNodeConfig { // Add outputs for (let outputId = 0; outputId < def.output.length; outputId++) { const linksFrom = this.linksFrom[node.index]; - if (linksFrom?.[outputId] && !this.externalFrom[node.index]?.[outputId]) { - // This output is linked internally so we can skip it + // If this output is linked internally we flag it to hide + const hasLink = linksFrom?.[outputId] && !this.externalFrom[node.index]?.[outputId]; + const customConfig = this.nodeData.config?.[node.index]?.output?.[outputId]; + const visible = customConfig?.visible ?? !hasLink; + this.outputVisibility.push(visible); + if (!visible) { continue; } @@ -500,11 +506,15 @@ export class GroupNodeConfig { this.nodeDef.output.push(def.output[outputId]); this.nodeDef.output_is_list.push(def.output_is_list[outputId]); - let label = def.output_name?.[outputId] ?? def.output[outputId]; - const output = node.outputs.find((o) => o.name === label); - if (output?.label) { - label = output.label; + let label = customConfig?.name; + if (!label) { + label = def.output_name?.[outputId] ?? def.output[outputId]; + const output = node.outputs.find((o) => o.name === label); + if (output?.label) { + label = output.label; + } } + let name = label; if (name in seenOutputs) { const prefix = `${node.title ?? node.type} `; @@ -677,6 +687,25 @@ export class GroupNodeHandler { return this.innerNodes; }; + this.node.recreate = async () => { + const id = this.node.id; + const sz = this.node.size; + const nodes = this.node.convertToNodes(); + + const groupNode = LiteGraph.createNode(this.node.type); + groupNode.id = id; + + // Reuse the existing nodes for this instance + groupNode.setInnerNodes(nodes); + groupNode[GROUP].populateWidgets(); + app.graph.add(groupNode); + groupNode.size = [Math.max(groupNode.size[0], sz[0]), Math.max(groupNode.size[1], sz[1])]; + + // Remove all converted nodes and relink them + groupNode[GROUP].replaceNodes(nodes); + return groupNode; + }; + this.node.convertToNodes = () => { const addInnerNodes = () => { const backup = localStorage.getItem("litegrapheditor_clipboard"); @@ -769,6 +798,7 @@ export class GroupNodeHandler { const slot = node.inputs[groupSlotId]; if (slot.link == null) continue; const link = app.graph.links[slot.link]; + if (!link) continue; // connect this node output to the input of another node const originNode = app.graph.getNodeById(link.origin_id); originNode.connect(link.origin_slot, newNode, +innerInputId); @@ -806,12 +836,23 @@ export class GroupNodeHandler { let optionIndex = options.findIndex((o) => o.content === "Outputs"); if (optionIndex === -1) optionIndex = options.length; else optionIndex++; - options.splice(optionIndex, 0, null, { - content: "Convert to nodes", - callback: () => { - return this.convertToNodes(); + options.splice( + optionIndex, + 0, + null, + { + content: "Convert to nodes", + callback: () => { + return this.convertToNodes(); + }, }, - }); + { + content: "Manage Group Node", + callback: () => { + new ManageGroupDialog(app).show(this.type); + }, + } + ); }; // Draw custom collapse icon to identity this as a group @@ -865,6 +906,28 @@ export class GroupNodeHandler { return onExecutionStart?.apply(this, arguments); }; + const self = this; + const onNodeCreated = this.node.onNodeCreated; + this.node.onNodeCreated = function () { + const config = self.groupData.nodeData.config; + if (config) { + for (const n in config) { + const inputs = config[n]?.input; + for (const w in inputs) { + if (inputs[w].visible !== false) continue; + const widgetName = self.groupData.oldToNewWidgetMap[n][w]; + const widget = this.widgets.find((w) => w.name === widgetName); + if (widget) { + widget.type = "hidden"; + widget.computeSize = () => [0, -4]; + } + } + } + } + + return onNodeCreated?.apply(this, arguments); + }; + function handleEvent(type, getId, getEvent) { const handler = ({ detail }) => { const id = getId(detail); @@ -927,13 +990,15 @@ export class GroupNodeHandler { continue; } else if (innerNode.type === "Reroute") { const rerouteLinks = this.groupData.linksFrom[old.node.index]; - for (const [_, , targetNodeId, targetSlot] of rerouteLinks["0"]) { - const node = this.innerNodes[targetNodeId]; - const input = node.inputs[targetSlot]; - if (input.widget) { - const widget = node.widgets?.find((w) => w.name === input.widget.name); - if (widget) { - widget.value = newValue; + if (rerouteLinks) { + for (const [_, , targetNodeId, targetSlot] of rerouteLinks["0"]) { + const node = this.innerNodes[targetNodeId]; + const input = node.inputs[targetSlot]; + if (input.widget) { + const widget = node.widgets?.find((w) => w.name === input.widget.name); + if (widget) { + widget.value = newValue; + } } } } @@ -975,7 +1040,7 @@ export class GroupNodeHandler { const [, , targetNodeId, targetNodeSlot] = link; const targetNode = this.groupData.nodeData.nodes[targetNodeId]; const inputs = targetNode.inputs; - const targetWidget = inputs?.[targetNodeSlot].widget; + const targetWidget = inputs?.[targetNodeSlot]?.widget; if (!targetWidget) return; const offset = inputs.length - (targetNode.widgets_values?.length ?? 0); @@ -983,13 +1048,12 @@ export class GroupNodeHandler { if (v == null) return; const widgetName = Object.values(map)[0]; - const widget = this.node.widgets.find(w => w.name === widgetName); - if(widget) { + const widget = this.node.widgets.find((w) => w.name === widgetName); + if (widget) { widget.value = v; } } - populateWidgets() { if (!this.node.widgets) return; @@ -1080,7 +1144,7 @@ export class GroupNodeHandler { } static getGroupData(node) { - return node.constructor?.nodeData?.[GROUP]; + return (node.nodeData ?? node.constructor?.nodeData)?.[GROUP]; } static isGroupNode(node) { @@ -1112,7 +1176,7 @@ export class GroupNodeHandler { } function addConvertToGroupOptions() { - function addOption(options, index) { + function addConvertOption(options, index) { const selected = Object.values(app.canvas.selected_nodes ?? {}); const disabled = selected.length < 2 || selected.find((n) => GroupNodeHandler.isGroupNode(n)); options.splice(index + 1, null, { @@ -1124,12 +1188,25 @@ function addConvertToGroupOptions() { }); } + function addManageOption(options, index) { + const groups = app.graph.extra?.groupNodes; + const disabled = !groups || !Object.keys(groups).length; + options.splice(index + 1, null, { + content: `Manage Group Nodes`, + disabled, + callback: () => { + new ManageGroupDialog(app).show(); + }, + }); + } + // Add to canvas const getCanvasMenuOptions = LGraphCanvas.prototype.getCanvasMenuOptions; LGraphCanvas.prototype.getCanvasMenuOptions = function () { const options = getCanvasMenuOptions.apply(this, arguments); const index = options.findIndex((o) => o?.content === "Add Group") + 1 || options.length; - addOption(options, index); + addConvertOption(options, index); + addManageOption(options, index + 1); return options; }; @@ -1139,7 +1216,7 @@ function addConvertToGroupOptions() { const options = getNodeMenuOptions.apply(this, arguments); if (!GroupNodeHandler.isGroupNode(node)) { const index = options.findIndex((o) => o?.content === "Outputs") + 1 || options.length - 1; - addOption(options, index); + addConvertOption(options, index); } return options; }; diff --git a/web/extensions/core/groupNodeManage.css b/web/extensions/core/groupNodeManage.css new file mode 100644 index 00000000000..5ac89aee31b --- /dev/null +++ b/web/extensions/core/groupNodeManage.css @@ -0,0 +1,149 @@ +.comfy-group-manage { + background: var(--bg-color); + color: var(--fg-color); + padding: 0; + font-family: Arial, Helvetica, sans-serif; + border-color: black; + margin: 20vh auto; + max-height: 60vh; +} +.comfy-group-manage-outer { + max-height: 60vh; + min-width: 500px; + display: flex; + flex-direction: column; +} +.comfy-group-manage-outer > header { + display: flex; + align-items: center; + gap: 10px; + justify-content: space-between; + background: var(--comfy-menu-bg); + padding: 15px 20px; +} +.comfy-group-manage-outer > header select { + background: var(--comfy-input-bg); + border: 1px solid var(--border-color); + color: var(--input-text); + padding: 5px 10px; + border-radius: 5px; +} +.comfy-group-manage h2 { + margin: 0; + font-weight: normal; +} +.comfy-group-manage main { + display: flex; + overflow: hidden; +} +.comfy-group-manage .drag-handle { + font-weight: bold; +} +.comfy-group-manage-list { + border-right: 1px solid var(--comfy-menu-bg); +} +.comfy-group-manage-list ul { + margin: 40px 0 0; + padding: 0; + list-style: none; +} +.comfy-group-manage-list-items { + max-height: 70vh; + overflow-y: scroll; + overflow-x: hidden; +} +.comfy-group-manage-list li { + display: flex; + padding: 10px 20px 10px 10px; + cursor: pointer; + align-items: center; + gap: 5px; +} +.comfy-group-manage-list div { + display: flex; + flex-direction: column; +} +.comfy-group-manage-list li:not(.selected):hover div { + text-decoration: underline; +} +.comfy-group-manage-list li.selected { + background: var(--border-color); +} +.comfy-group-manage-list li span { + opacity: 0.7; + font-size: smaller; +} +.comfy-group-manage-node { + flex: auto; + background: var(--border-color); + display: flex; + flex-direction: column; +} +.comfy-group-manage-node > div { + overflow: auto; +} +.comfy-group-manage-node header { + display: flex; + background: var(--bg-color); + height: 40px; +} +.comfy-group-manage-node header a { + text-align: center; + flex: auto; + border-right: 1px solid var(--comfy-menu-bg); + border-bottom: 1px solid var(--comfy-menu-bg); + padding: 10px; + cursor: pointer; + font-size: 15px; +} +.comfy-group-manage-node header a:last-child { + border-right: none; +} +.comfy-group-manage-node header a:not(.active):hover { + text-decoration: underline; +} +.comfy-group-manage-node header a.active { + background: var(--border-color); + border-bottom: none; +} +.comfy-group-manage-node-page { + display: none; + overflow: auto; +} +.comfy-group-manage-node-page.active { + display: block; +} +.comfy-group-manage-node-page div { + padding: 10px; + display: flex; + align-items: center; + gap: 10px; +} +.comfy-group-manage-node-page input { + border: none; + color: var(--input-text); + background: var(--comfy-input-bg); + padding: 5px 10px; +} +.comfy-group-manage-node-page input[type="text"] { + flex: auto; +} +.comfy-group-manage-node-page label { + display: flex; + gap: 5px; + align-items: center; +} +.comfy-group-manage footer { + border-top: 1px solid var(--comfy-menu-bg); + padding: 10px; + display: flex; + gap: 10px; +} +.comfy-group-manage footer button { + font-size: 14px; + padding: 5px 10px; + border-radius: 0; +} +.comfy-group-manage footer button:first-child { + margin-right: auto; +} diff --git a/web/extensions/core/groupNodeManage.js b/web/extensions/core/groupNodeManage.js new file mode 100644 index 00000000000..1ab33838688 --- /dev/null +++ b/web/extensions/core/groupNodeManage.js @@ -0,0 +1,422 @@ +import { $el, ComfyDialog } from "../../scripts/ui.js"; +import { DraggableList } from "../../scripts/ui/draggableList.js"; +import { addStylesheet } from "../../scripts/utils.js"; +import { GroupNodeConfig, GroupNodeHandler } from "./groupNode.js"; + +addStylesheet(import.meta.url); + +const ORDER = Symbol(); + +function merge(target, source) { + if (typeof target === "object" && typeof source === "object") { + for (const key in source) { + const sv = source[key]; + if (typeof sv === "object") { + let tv = target[key]; + if (!tv) tv = target[key] = {}; + merge(tv, source[key]); + } else { + target[key] = sv; + } + } + } + + return target; +} + +export class ManageGroupDialog extends ComfyDialog { + /** @type { Record<"Inputs" | "Outputs" | "Widgets", {tab: HTMLAnchorElement, page: HTMLElement}> } */ + tabs = {}; + /** @type { number | null | undefined } */ + selectedNodeIndex; + /** @type { keyof ManageGroupDialog["tabs"] } */ + selectedTab = "Inputs"; + /** @type { string | undefined } */ + selectedGroup; + + /** @type { Record>> } */ + modifications = {}; + + get selectedNodeInnerIndex() { + return +this.nodeItems[this.selectedNodeIndex].dataset.nodeindex; + } + + constructor(app) { + super(); + this.app = app; + this.element = $el("dialog.comfy-group-manage", { + parent: document.body, + }); + } + + changeTab(tab) { + this.tabs[this.selectedTab].tab.classList.remove("active"); + this.tabs[this.selectedTab].page.classList.remove("active"); + this.tabs[tab].tab.classList.add("active"); + this.tabs[tab].page.classList.add("active"); + this.selectedTab = tab; + } + + changeNode(index, force) { + if (!force && this.selectedNodeIndex === index) return; + + if (this.selectedNodeIndex != null) { + this.nodeItems[this.selectedNodeIndex].classList.remove("selected"); + } + this.nodeItems[index].classList.add("selected"); + this.selectedNodeIndex = index; + + if (!this.buildInputsPage() && this.selectedTab === "Inputs") { + this.changeTab("Widgets"); + } + if (!this.buildWidgetsPage() && this.selectedTab === "Widgets") { + this.changeTab("Outputs"); + } + if (!this.buildOutputsPage() && this.selectedTab === "Outputs") { + this.changeTab("Inputs"); + } + + this.changeTab(this.selectedTab); + } + + getGroupData() { + this.groupNodeType = LiteGraph.registered_node_types["workflow/" + this.selectedGroup]; + this.groupNodeDef = this.groupNodeType.nodeData; + this.groupData = GroupNodeHandler.getGroupData(this.groupNodeType); + } + + changeGroup(group, reset = true) { + this.selectedGroup = group; + this.getGroupData(); + + const nodes = this.groupData.nodeData.nodes; + this.nodeItems = nodes.map((n, i) => + $el( + "li.draggable-item", + { + dataset: { + nodeindex: n.index + "", + }, + onclick: () => { + this.changeNode(i); + }, + }, + [ + $el("span.drag-handle"), + $el( + "div", + { + textContent: n.title ?? n.type, + }, + n.title + ? $el("span", { + textContent: n.type, + }) + : [] + ), + ] + ) + ); + + this.innerNodesList.replaceChildren(...this.nodeItems); + + if (reset) { + this.selectedNodeIndex = null; + this.changeNode(0); + } else { + const items = this.draggable.getAllItems(); + let index = items.findIndex(item => item.classList.contains("selected")); + if(index === -1) index = this.selectedNodeIndex; + this.changeNode(index, true); + } + + const ordered = [...nodes]; + this.draggable?.dispose(); + this.draggable = new DraggableList(this.innerNodesList, "li"); + this.draggable.addEventListener("dragend", ({ detail: { oldPosition, newPosition } }) => { + if (oldPosition === newPosition) return; + ordered.splice(newPosition, 0, ordered.splice(oldPosition, 1)[0]); + for (let i = 0; i < ordered.length; i++) { + this.storeModification({ nodeIndex: ordered[i].index, section: ORDER, prop: "order", value: i }); + } + }); + } + + storeModification({ nodeIndex, section, prop, value }) { + const groupMod = (this.modifications[this.selectedGroup] ??= {}); + const nodesMod = (groupMod.nodes ??= {}); + const nodeMod = (nodesMod[nodeIndex ?? this.selectedNodeInnerIndex] ??= {}); + const typeMod = (nodeMod[section] ??= {}); + if (typeof value === "object") { + const objMod = (typeMod[prop] ??= {}); + Object.assign(objMod, value); + } else { + typeMod[prop] = value; + } + } + + getEditElement(section, prop, value, placeholder, checked, checkable = true) { + if (value === placeholder) value = ""; + + const mods = this.modifications[this.selectedGroup]?.nodes?.[this.selectedNodeInnerIndex]?.[section]?.[prop]; + if (mods) { + if (mods.name != null) { + value = mods.name; + } + if (mods.visible != null) { + checked = mods.visible; + } + } + + return $el("div", [ + $el("input", { + value, + placeholder, + type: "text", + onchange: (e) => { + this.storeModification({ section, prop, value: { name: e.target.value } }); + }, + }), + $el("label", { textContent: "Visible" }, [ + $el("input", { + type: "checkbox", + checked, + disabled: !checkable, + onchange: (e) => { + this.storeModification({ section, prop, value: { visible: !!e.target.checked } }); + }, + }), + ]), + ]); + } + + buildWidgetsPage() { + const widgets = this.groupData.oldToNewWidgetMap[this.selectedNodeInnerIndex]; + const items = Object.keys(widgets ?? {}); + const type = app.graph.extra.groupNodes[this.selectedGroup]; + const config = type.config?.[this.selectedNodeInnerIndex]?.input; + this.widgetsPage.replaceChildren( + ...items.map((oldName) => { + return this.getEditElement("input", oldName, widgets[oldName], oldName, config?.[oldName]?.visible !== false); + }) + ); + return !!items.length; + } + + buildInputsPage() { + const inputs = this.groupData.nodeInputs[this.selectedNodeInnerIndex]; + const items = Object.keys(inputs ?? {}); + const type = app.graph.extra.groupNodes[this.selectedGroup]; + const config = type.config?.[this.selectedNodeInnerIndex]?.input; + this.inputsPage.replaceChildren( + ...items + .map((oldName) => { + let value = inputs[oldName]; + if (!value) { + return; + } + + return this.getEditElement("input", oldName, value, oldName, config?.[oldName]?.visible !== false); + }) + .filter(Boolean) + ); + return !!items.length; + } + + buildOutputsPage() { + const nodes = this.groupData.nodeData.nodes; + const innerNodeDef = this.groupData.getNodeDef(nodes[this.selectedNodeInnerIndex]); + const outputs = innerNodeDef?.output ?? []; + const groupOutputs = this.groupData.oldToNewOutputMap[this.selectedNodeInnerIndex]; + + const type = app.graph.extra.groupNodes[this.selectedGroup]; + const config = type.config?.[this.selectedNodeInnerIndex]?.output; + const node = this.groupData.nodeData.nodes[this.selectedNodeInnerIndex]; + const checkable = node.type !== "PrimitiveNode"; + this.outputsPage.replaceChildren( + ...outputs + .map((type, slot) => { + const groupOutputIndex = groupOutputs?.[slot]; + const oldName = innerNodeDef.output_name?.[slot] ?? type; + let value = config?.[slot]?.name; + const visible = config?.[slot]?.visible || groupOutputIndex != null; + if (!value || value === oldName) { + value = ""; + } + return this.getEditElement("output", slot, value, oldName, visible, checkable); + }) + .filter(Boolean) + ); + return !!outputs.length; + } + + show(type) { + const groupNodes = Object.keys(app.graph.extra?.groupNodes ?? {}).sort((a, b) => a.localeCompare(b)); + + this.innerNodesList = $el("ul.comfy-group-manage-list-items"); + this.widgetsPage = $el("section.comfy-group-manage-node-page"); + this.inputsPage = $el("section.comfy-group-manage-node-page"); + this.outputsPage = $el("section.comfy-group-manage-node-page"); + const pages = $el("div", [this.widgetsPage, this.inputsPage, this.outputsPage]); + + this.tabs = [ + ["Inputs", this.inputsPage], + ["Widgets", this.widgetsPage], + ["Outputs", this.outputsPage], + ].reduce((p, [name, page]) => { + p[name] = { + tab: $el("a", { + onclick: () => { + this.changeTab(name); + }, + textContent: name, + }), + page, + }; + return p; + }, {}); + + const outer = $el("div.comfy-group-manage-outer", [ + $el("header", [ + $el("h2", "Group Nodes"), + $el( + "select", + { + onchange: (e) => { + this.changeGroup(e.target.value); + }, + }, + groupNodes.map((g) => + $el("option", { + textContent: g, + selected: "workflow/" + g === type, + value: g, + }) + ) + ), + ]), + $el("main", [ + $el("section.comfy-group-manage-list", this.innerNodesList), + $el("section.comfy-group-manage-node", [ + $el( + "header", + Object.values(this.tabs).map((t) => t.tab) + ), + pages, + ]), + ]), + $el("footer", [ + $el( + "button.comfy-btn", + { + onclick: (e) => { + const node = app.graph._nodes.find((n) => n.type === "workflow/" + this.selectedGroup); + if (node) { + alert("This group node is in use in the current workflow, please first remove these."); + return; + } + if (confirm(`Are you sure you want to remove the node: "${this.selectedGroup}"`)) { + delete app.graph.extra.groupNodes[this.selectedGroup]; + LiteGraph.unregisterNodeType("workflow/" + this.selectedGroup); + } + this.show(); + }, + }, + "Delete Group Node" + ), + $el( + "button.comfy-btn", + { + onclick: async () => { + let nodesByType; + let recreateNodes = []; + const types = {}; + for (const g in this.modifications) { + const type = app.graph.extra.groupNodes[g]; + let config = (type.config ??= {}); + + let nodeMods = this.modifications[g]?.nodes; + if (nodeMods) { + const keys = Object.keys(nodeMods); + if (nodeMods[keys[0]][ORDER]) { + // If any node is reordered, they will all need sequencing + const orderedNodes = []; + const orderedMods = {}; + const orderedConfig = {}; + + for (const n of keys) { + const order = nodeMods[n][ORDER].order; + orderedNodes[order] = type.nodes[+n]; + orderedMods[order] = nodeMods[n]; + orderedNodes[order].index = order; + } + + // Rewrite links + for (const l of type.links) { + if (l[0] != null) l[0] = type.nodes[l[0]].index; + if (l[2] != null) l[2] = type.nodes[l[2]].index; + } + + // Rewrite externals + if (type.external) { + for (const ext of type.external) { + ext[0] = type.nodes[ext[0]]; + } + } + + // Rewrite modifications + for (const id of keys) { + if (config[id]) { + orderedConfig[type.nodes[id].index] = config[id]; + } + delete config[id]; + } + + type.nodes = orderedNodes; + nodeMods = orderedMods; + type.config = config = orderedConfig; + } + + merge(config, nodeMods); + } + + types[g] = type; + + if (!nodesByType) { + nodesByType = app.graph._nodes.reduce((p, n) => { + p[n.type] ??= []; + p[n.type].push(n); + return p; + }, {}); + } + + const nodes = nodesByType["workflow/" + g]; + if (nodes) recreateNodes.push(...nodes); + } + + await GroupNodeConfig.registerFromWorkflow(types, {}); + + for (const node of recreateNodes) { + node.recreate(); + } + + this.modifications = {}; + this.app.graph.setDirtyCanvas(true, true); + this.changeGroup(this.selectedGroup, false); + }, + }, + "Save" + ), + $el("button.comfy-btn", { onclick: () => this.element.close() }, "Close"), + ]), + ]); + + this.element.replaceChildren(outer); + this.changeGroup(type ? groupNodes.find((g) => "workflow/" + g === type) : groupNodes[0]); + this.element.showModal(); + + this.element.addEventListener("close", () => { + this.draggable?.dispose(); + }); + } +} \ No newline at end of file diff --git a/web/jsconfig.json b/web/jsconfig.json index 57403d8cf2b..b65fa2746da 100644 --- a/web/jsconfig.json +++ b/web/jsconfig.json @@ -3,7 +3,8 @@ "baseUrl": ".", "paths": { "/*": ["./*"] - } + }, + "lib": ["DOM", "ES2022"] }, "include": ["."] } diff --git a/web/scripts/domWidget.js b/web/scripts/domWidget.js index eb0742d3882..d5eeebdbd39 100644 --- a/web/scripts/domWidget.js +++ b/web/scripts/domWidget.js @@ -239,7 +239,8 @@ LGraphNode.prototype.addDOMWidget = function (name, type, element, options) { node.flags?.collapsed || (!!options.hideOnZoom && app.canvas.ds.scale < 0.5) || widget.computedHeight <= 0 || - widget.type === "converted-widget"; + widget.type === "converted-widget"|| + widget.type === "hidden"; element.hidden = hidden; element.style.display = hidden ? "none" : null; if (hidden) { diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 6887f70288a..d07d69dc8f0 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -4,6 +4,19 @@ import { ComfySettingsDialog } from "./ui/settings.js"; export const ComfyDialog = _ComfyDialog; +/** + * + * @param { string } tag HTML Element Tag and optional classes e.g. div.class1.class2 + * @param { string | Element | Element[] | { + * parent?: Element, + * $?: (el: Element) => void, + * dataset?: DOMStringMap, + * style?: CSSStyleDeclaration, + * for?: string + * } | undefined } propsOrChildren + * @param { Element[] | undefined } [children] + * @returns + */ export function $el(tag, propsOrChildren, children) { const split = tag.split("."); const element = document.createElement(split.shift()); @@ -12,6 +25,11 @@ export function $el(tag, propsOrChildren, children) { } if (propsOrChildren) { + if (typeof propsOrChildren === "string") { + propsOrChildren = { textContent: propsOrChildren }; + } else if (propsOrChildren instanceof Element) { + propsOrChildren = [propsOrChildren]; + } if (Array.isArray(propsOrChildren)) { element.append(...propsOrChildren); } else { @@ -35,7 +53,7 @@ export function $el(tag, propsOrChildren, children) { Object.assign(element, propsOrChildren); if (children) { - element.append(...children); + element.append(...(children instanceof Array ? children : [children])); } if (parent) { diff --git a/web/scripts/ui/draggableList.js b/web/scripts/ui/draggableList.js new file mode 100644 index 00000000000..d535948869f --- /dev/null +++ b/web/scripts/ui/draggableList.js @@ -0,0 +1,287 @@ +// @ts-check +/* + Original implementation: + https://github.com/TahaSh/drag-to-reorder + MIT License + + Copyright (c) 2023 Taha Shashtari + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +import { $el } from "../ui.js"; + +$el("style", { + parent: document.head, + textContent: ` + .draggable-item { + position: relative; + will-change: transform; + user-select: none; + } + .draggable-item.is-idle { + transition: 0.25s ease transform; + } + .draggable-item.is-draggable { + z-index: 10; + } + ` +}); + +export class DraggableList extends EventTarget { + listContainer; + draggableItem; + pointerStartX; + pointerStartY; + scrollYMax; + itemsGap = 0; + items = []; + itemSelector; + handleClass = "drag-handle"; + off = []; + offDrag = []; + + constructor(element, itemSelector) { + super(); + this.listContainer = element; + this.itemSelector = itemSelector; + + if (!this.listContainer) return; + + this.off.push(this.on(this.listContainer, "mousedown", this.dragStart)); + this.off.push(this.on(this.listContainer, "touchstart", this.dragStart)); + this.off.push(this.on(document, "mouseup", this.dragEnd)); + this.off.push(this.on(document, "touchend", this.dragEnd)); + } + + getAllItems() { + if (!this.items?.length) { + this.items = Array.from(this.listContainer.querySelectorAll(this.itemSelector)); + this.items.forEach((element) => { + element.classList.add("is-idle"); + }); + } + return this.items; + } + + getIdleItems() { + return this.getAllItems().filter((item) => item.classList.contains("is-idle")); + } + + isItemAbove(item) { + return item.hasAttribute("data-is-above"); + } + + isItemToggled(item) { + return item.hasAttribute("data-is-toggled"); + } + + on(source, event, listener, options) { + listener = listener.bind(this); + source.addEventListener(event, listener, options); + return () => source.removeEventListener(event, listener); + } + + dragStart(e) { + if (e.target.classList.contains(this.handleClass)) { + this.draggableItem = e.target.closest(this.itemSelector); + } + + if (!this.draggableItem) return; + + this.pointerStartX = e.clientX || e.touches[0].clientX; + this.pointerStartY = e.clientY || e.touches[0].clientY; + this.scrollYMax = this.listContainer.scrollHeight - this.listContainer.clientHeight; + + this.setItemsGap(); + this.initDraggableItem(); + this.initItemsState(); + + this.offDrag.push(this.on(document, "mousemove", this.drag)); + this.offDrag.push(this.on(document, "touchmove", this.drag, { passive: false })); + + this.dispatchEvent( + new CustomEvent("dragstart", { + detail: { element: this.draggableItem, position: this.getAllItems().indexOf(this.draggableItem) }, + }) + ); + } + + setItemsGap() { + if (this.getIdleItems().length <= 1) { + this.itemsGap = 0; + return; + } + + const item1 = this.getIdleItems()[0]; + const item2 = this.getIdleItems()[1]; + + const item1Rect = item1.getBoundingClientRect(); + const item2Rect = item2.getBoundingClientRect(); + + this.itemsGap = Math.abs(item1Rect.bottom - item2Rect.top); + } + + initItemsState() { + this.getIdleItems().forEach((item, i) => { + if (this.getAllItems().indexOf(this.draggableItem) > i) { + item.dataset.isAbove = ""; + } + }); + } + + initDraggableItem() { + this.draggableItem.classList.remove("is-idle"); + this.draggableItem.classList.add("is-draggable"); + } + + drag(e) { + if (!this.draggableItem) return; + + e.preventDefault(); + + const clientX = e.clientX || e.touches[0].clientX; + const clientY = e.clientY || e.touches[0].clientY; + + const listRect = this.listContainer.getBoundingClientRect(); + + if (clientY > listRect.bottom) { + if (this.listContainer.scrollTop < this.scrollYMax) { + this.listContainer.scrollBy(0, 10); + this.pointerStartY -= 10; + } + } else if (clientY < listRect.top && this.listContainer.scrollTop > 0) { + this.pointerStartY += 10; + this.listContainer.scrollBy(0, -10); + } + + const pointerOffsetX = clientX - this.pointerStartX; + const pointerOffsetY = clientY - this.pointerStartY; + + this.updateIdleItemsStateAndPosition(); + this.draggableItem.style.transform = `translate(${pointerOffsetX}px, ${pointerOffsetY}px)`; + } + + updateIdleItemsStateAndPosition() { + const draggableItemRect = this.draggableItem.getBoundingClientRect(); + const draggableItemY = draggableItemRect.top + draggableItemRect.height / 2; + + // Update state + this.getIdleItems().forEach((item) => { + const itemRect = item.getBoundingClientRect(); + const itemY = itemRect.top + itemRect.height / 2; + if (this.isItemAbove(item)) { + if (draggableItemY <= itemY) { + item.dataset.isToggled = ""; + } else { + delete item.dataset.isToggled; + } + } else { + if (draggableItemY >= itemY) { + item.dataset.isToggled = ""; + } else { + delete item.dataset.isToggled; + } + } + }); + + // Update position + this.getIdleItems().forEach((item) => { + if (this.isItemToggled(item)) { + const direction = this.isItemAbove(item) ? 1 : -1; + item.style.transform = `translateY(${direction * (draggableItemRect.height + this.itemsGap)}px)`; + } else { + item.style.transform = ""; + } + }); + } + + dragEnd() { + if (!this.draggableItem) return; + + this.applyNewItemsOrder(); + this.cleanup(); + } + + applyNewItemsOrder() { + const reorderedItems = []; + + let oldPosition = -1; + this.getAllItems().forEach((item, index) => { + if (item === this.draggableItem) { + oldPosition = index; + return; + } + if (!this.isItemToggled(item)) { + reorderedItems[index] = item; + return; + } + const newIndex = this.isItemAbove(item) ? index + 1 : index - 1; + reorderedItems[newIndex] = item; + }); + + for (let index = 0; index < this.getAllItems().length; index++) { + const item = reorderedItems[index]; + if (typeof item === "undefined") { + reorderedItems[index] = this.draggableItem; + } + } + + reorderedItems.forEach((item) => { + this.listContainer.appendChild(item); + }); + + this.items = reorderedItems; + + this.dispatchEvent( + new CustomEvent("dragend", { + detail: { element: this.draggableItem, oldPosition, newPosition: reorderedItems.indexOf(this.draggableItem) }, + }) + ); + } + + cleanup() { + this.itemsGap = 0; + this.items = []; + this.unsetDraggableItem(); + this.unsetItemState(); + + this.offDrag.forEach((f) => f()); + this.offDrag = []; + } + + unsetDraggableItem() { + this.draggableItem.style = null; + this.draggableItem.classList.remove("is-draggable"); + this.draggableItem.classList.add("is-idle"); + this.draggableItem = null; + } + + unsetItemState() { + this.getIdleItems().forEach((item, i) => { + delete item.dataset.isAbove; + delete item.dataset.isToggled; + item.style.transform = ""; + }); + } + + dispose() { + this.off.forEach((f) => f()); + } +} diff --git a/web/style.css b/web/style.css index c57fb7f735d..5c1133495d7 100644 --- a/web/style.css +++ b/web/style.css @@ -145,6 +145,12 @@ body { } .comfy-menu span.drag-handle { + position: absolute; + top: 0; + left: 0; +} + +span.drag-handle { width: 10px; height: 20px; display: inline-block; @@ -160,12 +166,9 @@ body { letter-spacing: 2px; color: var(--drag-text); text-shadow: 1px 0 1px black; - position: absolute; - top: 0; - left: 0; } -.comfy-menu span.drag-handle::after { +span.drag-handle::after { content: '.. .. ..'; } From 9bddc9d94b478202c6ea3e8ff8007efefcc047c1 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sat, 13 Jan 2024 21:02:51 +0000 Subject: [PATCH 397/420] Fix crash on group render --- web/extensions/core/groupNode.js | 1 + 1 file changed, 1 insertion(+) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 4cf1f7621b9..80b836b7192 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -843,6 +843,7 @@ export class GroupNodeHandler { const r = onDrawForeground?.apply?.(this, arguments); if (+app.runningNodeId === this.id && this.runningInternalNodeId !== null) { const n = groupData.nodes[this.runningInternalNodeId]; + if(!n) return; const message = `Running ${n.title || n.type} (${this.runningInternalNodeId}/${groupData.nodes.length})`; ctx.save(); ctx.font = "12px sans-serif"; From 270daa02a8f94ad8600a1c3bf562c981fd02f416 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Sun, 14 Jan 2024 19:53:52 +0000 Subject: [PATCH 398/420] Adds copy image option if browser feature available (#2544) * Adds copy image option if browser feature available * refactor --- web/scripts/app.js | 118 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 96 insertions(+), 22 deletions(-) diff --git a/web/scripts/app.js b/web/scripts/app.js index d131045d7ab..e6c01061741 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -269,6 +269,71 @@ export class ComfyApp { * @param {*} node The node to add the menu handler */ #addNodeContextMenuHandler(node) { + function getCopyImageOption(img) { + if (typeof window.ClipboardItem === "undefined") return []; + return [ + { + content: "Copy Image", + callback: async () => { + const url = new URL(img.src); + url.searchParams.delete("preview"); + + const writeImage = async (blob) => { + await navigator.clipboard.write([ + new ClipboardItem({ + [blob.type]: blob, + }), + ]); + }; + + try { + const data = await fetch(url); + const blob = await data.blob(); + try { + await writeImage(blob); + } catch (error) { + // Chrome seems to only support PNG on write, convert and try again + if (blob.type !== "image/png") { + const canvas = $el("canvas", { + width: img.naturalWidth, + height: img.naturalHeight, + }); + const ctx = canvas.getContext("2d"); + let image; + if (typeof window.createImageBitmap === "undefined") { + image = new Image(); + const p = new Promise((resolve, reject) => { + image.onload = resolve; + image.onerror = reject; + }).finally(() => { + URL.revokeObjectURL(image.src); + }); + image.src = URL.createObjectURL(blob); + await p; + } else { + image = await createImageBitmap(blob); + } + try { + ctx.drawImage(image, 0, 0); + canvas.toBlob(writeImage, "image/png"); + } finally { + if (typeof image.close === "function") { + image.close(); + } + } + + return; + } + throw error; + } + } catch (error) { + alert("Error copying image: " + (error.message ?? error)); + } + }, + }, + ]; + } + node.prototype.getExtraMenuOptions = function (_, options) { if (this.imgs) { // If this node has images then we add an open in new tab item @@ -286,16 +351,17 @@ export class ComfyApp { content: "Open Image", callback: () => { let url = new URL(img.src); - url.searchParams.delete('preview'); - window.open(url, "_blank") + url.searchParams.delete("preview"); + window.open(url, "_blank"); }, }, + ...getCopyImageOption(img), { content: "Save Image", callback: () => { const a = document.createElement("a"); let url = new URL(img.src); - url.searchParams.delete('preview'); + url.searchParams.delete("preview"); a.href = url; a.setAttribute("download", new URLSearchParams(url.search).get("filename")); document.body.append(a); @@ -308,33 +374,41 @@ export class ComfyApp { } options.push({ - content: "Bypass", - callback: (obj) => { if (this.mode === 4) this.mode = 0; else this.mode = 4; this.graph.change(); } - }); + content: "Bypass", + callback: (obj) => { + if (this.mode === 4) this.mode = 0; + else this.mode = 4; + this.graph.change(); + }, + }); // prevent conflict of clipspace content - if(!ComfyApp.clipspace_return_node) { + if (!ComfyApp.clipspace_return_node) { options.push({ - content: "Copy (Clipspace)", - callback: (obj) => { ComfyApp.copyToClipspace(this); } - }); + content: "Copy (Clipspace)", + callback: (obj) => { + ComfyApp.copyToClipspace(this); + }, + }); - if(ComfyApp.clipspace != null) { + if (ComfyApp.clipspace != null) { options.push({ - content: "Paste (Clipspace)", - callback: () => { ComfyApp.pasteFromClipspace(this); } - }); + content: "Paste (Clipspace)", + callback: () => { + ComfyApp.pasteFromClipspace(this); + }, + }); } - if(ComfyApp.isImageNode(this)) { + if (ComfyApp.isImageNode(this)) { options.push({ - content: "Open in MaskEditor", - callback: (obj) => { - ComfyApp.copyToClipspace(this); - ComfyApp.clipspace_return_node = this; - ComfyApp.open_maskeditor(); - } - }); + content: "Open in MaskEditor", + callback: (obj) => { + ComfyApp.copyToClipspace(this); + ComfyApp.clipspace_return_node = this; + ComfyApp.open_maskeditor(); + }, + }); } } }; From 2395ae740a4dd45534e8ba21031deaefdcade1b4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 14 Jan 2024 17:25:21 -0500 Subject: [PATCH 399/420] Make unclip more deterministic. Pass a seed argument note that this might make old unclip images different. --- comfy/ldm/modules/encoders/noise_aug_modules.py | 4 ++-- comfy/model_base.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/comfy/ldm/modules/encoders/noise_aug_modules.py b/comfy/ldm/modules/encoders/noise_aug_modules.py index 66767b5874a..a5d86603016 100644 --- a/comfy/ldm/modules/encoders/noise_aug_modules.py +++ b/comfy/ldm/modules/encoders/noise_aug_modules.py @@ -23,13 +23,13 @@ def unscale(self, x): x = (x * self.data_std.to(x.device)) + self.data_mean.to(x.device) return x - def forward(self, x, noise_level=None): + def forward(self, x, noise_level=None, seed=None): if noise_level is None: noise_level = torch.randint(0, self.max_noise_level, (x.shape[0],), device=x.device).long() else: assert isinstance(noise_level, torch.Tensor) x = self.scale(x) - z = self.q_sample(x, noise_level) + z = self.q_sample(x, noise_level, seed=seed) z = self.unscale(z) noise_level = self.time_embed(noise_level) return z, noise_level diff --git a/comfy/model_base.py b/comfy/model_base.py index 52c87ede684..b2ea6590fff 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -210,7 +210,7 @@ def memory_required(self, input_shape): return (((area * 0.6) / 0.9) + 1024) * (1024 * 1024) -def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0): +def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge=0.0, seed=None): adm_inputs = [] weights = [] noise_aug = [] @@ -219,7 +219,7 @@ def unclip_adm(unclip_conditioning, device, noise_augmentor, noise_augment_merge weight = unclip_cond["strength"] noise_augment = unclip_cond["noise_augmentation"] noise_level = round((noise_augmentor.max_noise_level - 1) * noise_augment) - c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device)) + c_adm, noise_level_emb = noise_augmentor(adm_cond.to(device), noise_level=torch.tensor([noise_level], device=device), seed=seed) adm_out = torch.cat((c_adm, noise_level_emb), 1) * weight weights.append(weight) noise_aug.append(noise_augment) @@ -245,11 +245,11 @@ def encode_adm(self, **kwargs): if unclip_conditioning is None: return torch.zeros((1, self.adm_channels)) else: - return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05)) + return unclip_adm(unclip_conditioning, device, self.noise_augmentor, kwargs.get("unclip_noise_augment_merge", 0.05), kwargs.get("seed", 0) - 10) def sdxl_pooled(args, noise_augmentor): if "unclip_conditioning" in args: - return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor)[:,:1280] + return unclip_adm(args.get("unclip_conditioning", None), args["device"], noise_augmentor, seed=args.get("seed", 0) - 10)[:,:1280] else: return args["pooled_output"] From 1dab412c79eccdd055265475a0bd31ba97e2cf9c Mon Sep 17 00:00:00 2001 From: TFWol <9045213+TFWol@users.noreply.github.com> Date: Sun, 14 Jan 2024 15:06:33 -0800 Subject: [PATCH 400/420] Add error handling to initial fix to keep cache intact --- folder_paths.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/folder_paths.py b/folder_paths.py index 4abed783be0..ef9b8ccfaa5 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -139,6 +139,13 @@ def recursive_search(directory, excluded_dir_names=None): result = [] dirs = {} + + # Attempt to add the initial directory to dirs with error handling + try: + dirs[directory] = os.path.getmtime(directory) + except FileNotFoundError: + print(f"Warning: Unable to access {directory}. Skipping this path.") + for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True): subdirs[:] = [d for d in subdirs if d not in excluded_dir_names] for file_name in filenames: From f9e55d8463da692954d84f51ca354161396fe1b8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 15 Jan 2024 03:10:22 -0500 Subject: [PATCH 401/420] Only auto enable bf16 VAE on nvidia GPUs that actually support it. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index fefd3c8c99d..e12146d11b8 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -175,7 +175,7 @@ def is_nvidia(): if int(torch_version[0]) >= 2: if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False: ENABLE_PYTORCH_ATTENTION = True - if torch.cuda.is_bf16_supported(): + if torch.cuda.is_bf16_supported() and torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8: VAE_DTYPE = torch.bfloat16 if is_intel_xpu(): if args.use_split_cross_attention == False and args.use_quad_cross_attention == False: From 23687da9a94f862b0e372db3c61412c82b95f398 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Mon, 15 Jan 2024 17:45:48 +0000 Subject: [PATCH 402/420] Fix logging not checking onChange --- web/scripts/logging.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/web/scripts/logging.js b/web/scripts/logging.js index c73462e1ea3..875dd970bc8 100644 --- a/web/scripts/logging.js +++ b/web/scripts/logging.js @@ -269,6 +269,9 @@ export class ComfyLogging { id: settingId, name: settingId, defaultValue: true, + onChange: (value) => { + this.enabled = value; + }, type: (name, setter, value) => { return $el("tr", [ $el("td", [ @@ -283,7 +286,7 @@ export class ComfyLogging { type: "checkbox", checked: value, onchange: (event) => { - setter((this.enabled = event.target.checked)); + setter(event.target.checked); }, }), $el("button", { From 93bbe3f4c0cac7165a523185c7f373893f3292cb Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:27:40 +0000 Subject: [PATCH 403/420] Auto queue on change (#2542) * Add toggle to enable auto queue when graph is changed * type fix * better * better alignment * Change undoredo to not ignore inputs when autoqueue in change mode --- web/extensions/core/undoRedo.js | 31 +++++++++++++---- web/scripts/app.js | 1 + web/scripts/ui.js | 42 ++++++++++++++++++++--- web/scripts/ui/toggleSwitch.js | 60 +++++++++++++++++++++++++++++++++ web/style.css | 38 +++++++++++++++++++++ 5 files changed, 161 insertions(+), 11 deletions(-) create mode 100644 web/scripts/ui/toggleSwitch.js diff --git a/web/extensions/core/undoRedo.js b/web/extensions/core/undoRedo.js index ff976c74c3e..900eed2a7cd 100644 --- a/web/extensions/core/undoRedo.js +++ b/web/extensions/core/undoRedo.js @@ -1,4 +1,5 @@ import { app } from "../../scripts/app.js"; +import { api } from "../../scripts/api.js" const MAX_HISTORY = 50; @@ -15,6 +16,7 @@ function checkState() { } activeState = clone(currentState); redo.length = 0; + api.dispatchEvent(new CustomEvent("graphChanged", { detail: activeState })); } } @@ -92,7 +94,7 @@ const undoRedo = async (e) => { }; const bindInput = (activeEl) => { - if (activeEl?.tagName !== "CANVAS" && activeEl?.tagName !== "BODY") { + if (activeEl && activeEl.tagName !== "CANVAS" && activeEl.tagName !== "BODY") { for (const evt of ["change", "input", "blur"]) { if (`on${evt}` in activeEl) { const listener = () => { @@ -111,12 +113,16 @@ window.addEventListener( "keydown", (e) => { requestAnimationFrame(async () => { - const activeEl = document.activeElement; - if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") { - // Ignore events on inputs, they have their native history - return; + let activeEl; + // If we are auto queue in change mode then we do want to trigger on inputs + if (!app.ui.autoQueueEnabled || app.ui.autoQueueMode === "instant") { + activeEl = document.activeElement; + if (activeEl?.tagName === "INPUT" || activeEl?.type === "textarea") { + // Ignore events on inputs, they have their native history + return; + } } - + keyIgnored = e.key === "Control" || e.key === "Shift" || e.key === "Alt" || e.key === "Meta"; if (keyIgnored) return; @@ -143,6 +149,11 @@ window.addEventListener("mouseup", () => { checkState(); }); +// Handle prompt queue event for dynamic widget changes +api.addEventListener("promptQueued", () => { + checkState(); +}); + // Handle litegraph clicks const processMouseUp = LGraphCanvas.prototype.processMouseUp; LGraphCanvas.prototype.processMouseUp = function (e) { @@ -156,3 +167,11 @@ LGraphCanvas.prototype.processMouseDown = function (e) { checkState(); return v; }; + +// Handle litegraph context menu for COMBO widgets +const close = LiteGraph.ContextMenu.prototype.close; +LiteGraph.ContextMenu.prototype.close = function(e) { + const v = close.apply(this, arguments); + checkState(); + return v; +} \ No newline at end of file diff --git a/web/scripts/app.js b/web/scripts/app.js index e6c01061741..a2cd1e31627 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -2068,6 +2068,7 @@ export class ComfyApp { } finally { this.#processingQueue = false; } + api.dispatchEvent(new CustomEvent("promptQueued", { detail: { number, batchCount } })); } /** diff --git a/web/scripts/ui.js b/web/scripts/ui.js index d07d69dc8f0..4437345a3ff 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -1,5 +1,6 @@ import { api } from "./api.js"; import { ComfyDialog as _ComfyDialog } from "./ui/dialog.js"; +import { toggleSwitch } from "./ui/toggleSwitch.js"; import { ComfySettingsDialog } from "./ui/settings.js"; export const ComfyDialog = _ComfyDialog; @@ -368,6 +369,31 @@ export class ComfyUI { }, }); + const autoQueueModeEl = toggleSwitch( + "autoQueueMode", + [ + { text: "instant", tooltip: "A new prompt will be queued as soon as the queue reaches 0" }, + { text: "change", tooltip: "A new prompt will be queued when the queue is at 0 and the graph is/has changed" }, + ], + { + onChange: (value) => { + this.autoQueueMode = value.item.value; + }, + } + ); + autoQueueModeEl.style.display = "none"; + + api.addEventListener("graphChanged", () => { + if (this.autoQueueMode === "change") { + if (this.lastQueueSize === 0) { + this.graphHasChanged = false; + app.queuePrompt(0, this.batchCount); + } else { + this.graphHasChanged = true; + } + } + }); + this.menuContainer = $el("div.comfy-menu", {parent: document.body}, [ $el("div.drag-handle", { style: { @@ -394,6 +420,7 @@ export class ComfyUI { document.getElementById("extraOptions").style.display = i.srcElement.checked ? "block" : "none"; this.batchCount = i.srcElement.checked ? document.getElementById("batchCountInputRange").value : 1; document.getElementById("autoQueueCheckbox").checked = false; + this.autoQueueEnabled = false; }, }), ]), @@ -425,20 +452,22 @@ export class ComfyUI { }, }), ]), - $el("div",[ $el("label",{ for:"autoQueueCheckbox", innerHTML: "Auto Queue" - // textContent: "Auto Queue" }), $el("input", { id: "autoQueueCheckbox", type: "checkbox", checked: false, title: "Automatically queue prompt when the queue size hits 0", - + onchange: (e) => { + this.autoQueueEnabled = e.target.checked; + autoQueueModeEl.style.display = this.autoQueueEnabled ? "" : "none"; + } }), + autoQueueModeEl ]) ]), $el("div.comfy-menu-btns", [ @@ -572,10 +601,13 @@ export class ComfyUI { if ( this.lastQueueSize != 0 && status.exec_info.queue_remaining == 0 && - document.getElementById("autoQueueCheckbox").checked && - ! app.lastExecutionError + this.autoQueueEnabled && + (this.autoQueueMode === "instant" || this.graphHasChanged) && + !app.lastExecutionError ) { app.queuePrompt(0, this.batchCount); + status.exec_info.queue_remaining += this.batchCount; + this.graphHasChanged = false; } this.lastQueueSize = status.exec_info.queue_remaining; } diff --git a/web/scripts/ui/toggleSwitch.js b/web/scripts/ui/toggleSwitch.js new file mode 100644 index 00000000000..59597ef90e5 --- /dev/null +++ b/web/scripts/ui/toggleSwitch.js @@ -0,0 +1,60 @@ +import { $el } from "../ui.js"; + +/** + * @typedef { { text: string, value?: string, tooltip?: string } } ToggleSwitchItem + */ +/** + * Creates a toggle switch element + * @param { string } name + * @param { Array void } [opts.onChange] + */ +export function toggleSwitch(name, items, { onChange } = {}) { + let selectedIndex; + let elements; + + function updateSelected(index) { + if (selectedIndex != null) { + elements[selectedIndex].classList.remove("comfy-toggle-selected"); + } + onChange?.({ item: items[index], prev: selectedIndex == null ? undefined : items[selectedIndex] }); + selectedIndex = index; + elements[selectedIndex].classList.add("comfy-toggle-selected"); + } + + elements = items.map((item, i) => { + if (typeof item === "string") item = { text: item }; + if (!item.value) item.value = item.text; + + const toggle = $el( + "label", + { + textContent: item.text, + title: item.tooltip ?? "", + }, + $el("input", { + name, + type: "radio", + value: item.value ?? item.text, + checked: item.selected, + onchange: () => { + updateSelected(i); + }, + }) + ); + if (item.selected) { + updateSelected(i); + } + return toggle; + }); + + const container = $el("div.comfy-toggle-switch", elements); + + if (selectedIndex == null) { + elements[0].children[0].checked = true; + updateSelected(0); + } + + return container; +} diff --git a/web/style.css b/web/style.css index 5c1133495d7..44ee6019885 100644 --- a/web/style.css +++ b/web/style.css @@ -121,6 +121,7 @@ body { width: 100%; } +.comfy-toggle-switch, .comfy-btn, .comfy-menu > button, .comfy-menu-btns button, @@ -434,6 +435,43 @@ dialog::backdrop { margin-left: 5px; } +.comfy-toggle-switch { + border-width: 2px; + display: flex; + background-color: var(--comfy-input-bg); + margin: 2px 0; + white-space: nowrap; +} + +.comfy-toggle-switch label { + padding: 2px 0px 3px 6px; + flex: auto; + border-radius: 8px; + align-items: center; + display: flex; + justify-content: center; +} + +.comfy-toggle-switch label:first-child { + border-top-left-radius: 8px; + border-bottom-left-radius: 8px; +} +.comfy-toggle-switch label:last-child { + border-top-right-radius: 8px; + border-bottom-right-radius: 8px; +} + +.comfy-toggle-switch .comfy-toggle-selected { + background-color: var(--comfy-menu-bg); +} + +#extraOptions { + padding: 4px; + background-color: var(--bg-color); + margin-bottom: 4px; + border-radius: 4px; +} + /* Search box */ .litegraph.litesearchbox { From ee2c5fa72d4fc1714576fac7ba64aa5d607303d0 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 16 Jan 2024 13:58:54 +0000 Subject: [PATCH 404/420] Fix renaming upload widget (#2554) * Fix renaming upload widget * Allow custom name --- web/extensions/core/groupNode.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 18b0e6aa612..335bddbb781 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -349,7 +349,7 @@ export class GroupNodeConfig { } if (config[0] === "IMAGEUPLOAD") { if (!extra) extra = {}; - extra.widget = `${prefix}${config[1]?.widget ?? "image"}`; + extra.widget = this.oldToNewWidgetMap[node.index]?.[config[1]?.widget ?? "image"] ?? "image"; } if (extra) { From fad02dc2df0b9c4a0ab84dd1c559197cba752449 Mon Sep 17 00:00:00 2001 From: realazthat Date: Wed, 17 Jan 2024 17:16:34 -0500 Subject: [PATCH 405/420] Don't use PEP 604 type hints, to stay compatible with Python<3.10. --- execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/execution.py b/execution.py index e91e9a410e0..bc5cfe55c51 100644 --- a/execution.py +++ b/execution.py @@ -751,7 +751,7 @@ def task_done(self, item_id, outputs, if len(self.history) > MAXIMUM_HISTORY_SIZE: self.history.pop(next(iter(self.history))) - status_dict: dict|None = None + status_dict: Optional[dict] = None if status is not None: status_dict = copy.deepcopy(status._asdict()) From d76a04b6ea61306349861a7c4657567507385947 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 17 Jan 2024 19:37:19 -0500 Subject: [PATCH 406/420] Add unfinished ImageOnlyCheckpointSave node to save a SVD checkpoint. This node is unfinished, SVD checkpoints saved with this node will work with ComfyUI but not with anything else. --- comfy/clip_vision.py | 9 +++- comfy/model_base.py | 21 +++++--- comfy/sd.py | 13 +++-- comfy/supported_models_base.py | 6 +++ comfy_extras/nodes_model_merging.py | 83 +++++++++++++++-------------- comfy_extras/nodes_video_model.py | 17 ++++++ 6 files changed, 99 insertions(+), 50 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 4564fcfb2a0..200e1c6eff7 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,4 +1,4 @@ -from .utils import load_torch_file, transformers_convert, common_upscale +from .utils import load_torch_file, transformers_convert, common_upscale, state_dict_prefix_replace import os import torch import contextlib @@ -41,9 +41,13 @@ def __init__(self, json_config): self.model.eval() self.patcher = comfy.model_patcher.ModelPatcher(self.model, load_device=self.load_device, offload_device=offload_device) + def load_sd(self, sd): return self.model.load_state_dict(sd, strict=False) + def get_sd(self): + return self.model.state_dict() + def encode_image(self, image): comfy.model_management.load_model_gpu(self.patcher) pixel_values = clip_preprocess(image.to(self.load_device)).float() @@ -76,6 +80,9 @@ def convert_to_transformers(sd, prefix): sd['visual_projection.weight'] = sd.pop("{}proj".format(prefix)).transpose(0, 1) sd = transformers_convert(sd, prefix, "vision_model.", 48) + else: + replace_prefix = {prefix: ""} + sd = state_dict_prefix_replace(sd, replace_prefix) return sd def load_clipvision_from_sd(sd, prefix="", convert_keys=False): diff --git a/comfy/model_base.py b/comfy/model_base.py index b2ea6590fff..847687be461 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -179,19 +179,28 @@ def process_latent_in(self, latent): def process_latent_out(self, latent): return self.latent_format.process_out(latent) - def state_dict_for_saving(self, clip_state_dict, vae_state_dict): - clip_state_dict = self.model_config.process_clip_state_dict_for_saving(clip_state_dict) + def state_dict_for_saving(self, clip_state_dict=None, vae_state_dict=None, clip_vision_state_dict=None): + extra_sds = [] + if clip_state_dict is not None: + extra_sds.append(self.model_config.process_clip_state_dict_for_saving(clip_state_dict)) + if vae_state_dict is not None: + extra_sds.append(self.model_config.process_vae_state_dict_for_saving(vae_state_dict)) + if clip_vision_state_dict is not None: + extra_sds.append(self.model_config.process_clip_vision_state_dict_for_saving(clip_vision_state_dict)) + unet_state_dict = self.diffusion_model.state_dict() unet_state_dict = self.model_config.process_unet_state_dict_for_saving(unet_state_dict) - vae_state_dict = self.model_config.process_vae_state_dict_for_saving(vae_state_dict) + if self.get_dtype() == torch.float16: - clip_state_dict = utils.convert_sd_to(clip_state_dict, torch.float16) - vae_state_dict = utils.convert_sd_to(vae_state_dict, torch.float16) + extra_sds = map(lambda sd: utils.convert_sd_to(sd, torch.float16), extra_sds) if self.model_type == ModelType.V_PREDICTION: unet_state_dict["v_pred"] = torch.tensor([]) - return {**unet_state_dict, **vae_state_dict, **clip_state_dict} + for sd in extra_sds: + unet_state_dict.update(sd) + + return unet_state_dict def set_inpaint(self): self.inpaint_model = True diff --git a/comfy/sd.py b/comfy/sd.py index 1ff25bec630..f49e87b1598 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -534,7 +534,14 @@ def load_unet(unet_path): raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path)) return model -def save_checkpoint(output_path, model, clip, vae, metadata=None): - model_management.load_models_gpu([model, clip.load_model()]) - sd = model.model.state_dict_for_saving(clip.get_sd(), vae.get_sd()) +def save_checkpoint(output_path, model, clip=None, vae=None, clip_vision=None, metadata=None): + clip_sd = None + load_models = [model] + if clip is not None: + load_models.append(clip.load_model()) + clip_sd = clip.get_sd() + + model_management.load_models_gpu(load_models) + clip_vision_sd = clip_vision.get_sd() if clip_vision is not None else None + sd = model.model.state_dict_for_saving(clip_sd, vae.get_sd(), clip_vision_sd) comfy.utils.save_torch_file(sd, output_path, metadata=metadata) diff --git a/comfy/supported_models_base.py b/comfy/supported_models_base.py index 49087d23e5d..5baf4bca6c6 100644 --- a/comfy/supported_models_base.py +++ b/comfy/supported_models_base.py @@ -65,6 +65,12 @@ def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {"": "cond_stage_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def process_clip_vision_state_dict_for_saving(self, state_dict): + replace_prefix = {} + if self.clip_vision_prefix is not None: + replace_prefix[""] = self.clip_vision_prefix + return utils.state_dict_prefix_replace(state_dict, replace_prefix) + def process_unet_state_dict_for_saving(self, state_dict): replace_prefix = {"": "model.diffusion_model."} return utils.state_dict_prefix_replace(state_dict, replace_prefix) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index dad1dd6378d..d594cf490b6 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -119,6 +119,48 @@ def merge(self, model1, model2, **kwargs): m.add_patches({k: kp[k]}, 1.0 - ratio, ratio) return (m, ) +def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefix=None, output_dir=None, prompt=None, extra_pnginfo=None): + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, output_dir) + prompt_info = "" + if prompt is not None: + prompt_info = json.dumps(prompt) + + metadata = {} + + enable_modelspec = True + if isinstance(model.model, comfy.model_base.SDXL): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" + elif isinstance(model.model, comfy.model_base.SDXLRefiner): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" + else: + enable_modelspec = False + + if enable_modelspec: + metadata["modelspec.sai_model_spec"] = "1.0.0" + metadata["modelspec.implementation"] = "sgm" + metadata["modelspec.title"] = "{} {}".format(filename, counter) + + #TODO: + # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", + # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", + # "v2-inpainting" + + if model.model.model_type == comfy.model_base.ModelType.EPS: + metadata["modelspec.predict_key"] = "epsilon" + elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: + metadata["modelspec.predict_key"] = "v" + + if not args.disable_metadata: + metadata["prompt"] = prompt_info + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata[x] = json.dumps(extra_pnginfo[x]) + + output_checkpoint = f"{filename}_{counter:05}_.safetensors" + output_checkpoint = os.path.join(full_output_folder, output_checkpoint) + + comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, clip_vision, metadata=metadata) + class CheckpointSave: def __init__(self): self.output_dir = folder_paths.get_output_directory() @@ -137,46 +179,7 @@ def INPUT_TYPES(s): CATEGORY = "advanced/model_merging" def save(self, model, clip, vae, filename_prefix, prompt=None, extra_pnginfo=None): - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - prompt_info = "" - if prompt is not None: - prompt_info = json.dumps(prompt) - - metadata = {} - - enable_modelspec = True - if isinstance(model.model, comfy.model_base.SDXL): - metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" - elif isinstance(model.model, comfy.model_base.SDXLRefiner): - metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" - else: - enable_modelspec = False - - if enable_modelspec: - metadata["modelspec.sai_model_spec"] = "1.0.0" - metadata["modelspec.implementation"] = "sgm" - metadata["modelspec.title"] = "{} {}".format(filename, counter) - - #TODO: - # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", - # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", - # "v2-inpainting" - - if model.model.model_type == comfy.model_base.ModelType.EPS: - metadata["modelspec.predict_key"] = "epsilon" - elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: - metadata["modelspec.predict_key"] = "v" - - if not args.disable_metadata: - metadata["prompt"] = prompt_info - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata[x] = json.dumps(extra_pnginfo[x]) - - output_checkpoint = f"{filename}_{counter:05}_.safetensors" - output_checkpoint = os.path.join(full_output_folder, output_checkpoint) - - comfy.sd.save_checkpoint(output_checkpoint, model, clip, vae, metadata=metadata) + save_checkpoint(model, clip=clip, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) return {} class CLIPSave: diff --git a/comfy_extras/nodes_video_model.py b/comfy_extras/nodes_video_model.py index 26a717a3836..a5262565282 100644 --- a/comfy_extras/nodes_video_model.py +++ b/comfy_extras/nodes_video_model.py @@ -3,6 +3,7 @@ import comfy.utils import comfy.sd import folder_paths +import comfy_extras.nodes_model_merging class ImageOnlyCheckpointLoader: @@ -78,10 +79,26 @@ def linear_cfg(args): m.set_model_sampler_cfg_function(linear_cfg) return (m, ) +class ImageOnlyCheckpointSave(comfy_extras.nodes_model_merging.CheckpointSave): + CATEGORY = "_for_testing" + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "clip_vision": ("CLIP_VISION",), + "vae": ("VAE",), + "filename_prefix": ("STRING", {"default": "checkpoints/ComfyUI"}),}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},} + + def save(self, model, clip_vision, vae, filename_prefix, prompt=None, extra_pnginfo=None): + comfy_extras.nodes_model_merging.save_checkpoint(model, clip_vision=clip_vision, vae=vae, filename_prefix=filename_prefix, output_dir=self.output_dir, prompt=prompt, extra_pnginfo=extra_pnginfo) + return {} + NODE_CLASS_MAPPINGS = { "ImageOnlyCheckpointLoader": ImageOnlyCheckpointLoader, "SVD_img2vid_Conditioning": SVD_img2vid_Conditioning, "VideoLinearCFGGuidance": VideoLinearCFGGuidance, + "ImageOnlyCheckpointSave": ImageOnlyCheckpointSave, } NODE_DISPLAY_NAME_MAPPINGS = { From 9fff3c46b44c7dcf9769cb702c7fab9d5ba87e77 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 18 Jan 2024 15:57:35 -0500 Subject: [PATCH 407/420] Move some nodes to model_patches section. --- comfy_extras/nodes_freelunch.py | 4 ++-- comfy_extras/nodes_hypertile.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 7512b841d74..7764aa0b013 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -34,7 +34,7 @@ def INPUT_TYPES(s): RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, b1, b2, s1, s2): model_channels = model.model.model_config.unet_config["model_channels"] @@ -73,7 +73,7 @@ def INPUT_TYPES(s): RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, b1, b2, s1, s2): model_channels = model.model.model_config.unet_config["model_channels"] diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py index 544ae1b22e7..ae55d23dd06 100644 --- a/comfy_extras/nodes_hypertile.py +++ b/comfy_extras/nodes_hypertile.py @@ -32,7 +32,7 @@ def INPUT_TYPES(s): RETURN_TYPES = ("MODEL",) FUNCTION = "patch" - CATEGORY = "_for_testing" + CATEGORY = "model_patches" def patch(self, model, tile_size, swap_size, max_depth, scale_depth): model_channels = model.model.model_config.unet_config["model_channels"] From 78a70fda877acab8e094d11697de51e979c54bc5 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 19 Jan 2024 15:38:05 -0500 Subject: [PATCH 408/420] Remove useless import. --- comfy/ops.py | 1 - 1 file changed, 1 deletion(-) diff --git a/comfy/ops.py b/comfy/ops.py index f6f85de60a1..f674b47f762 100644 --- a/comfy/ops.py +++ b/comfy/ops.py @@ -1,5 +1,4 @@ import torch -from contextlib import contextmanager import comfy.model_management def cast_bias_weight(s, input): From 5823f18a79d46446e07f418b9ede1939e1499ecc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 19 Jan 2024 23:08:15 -0500 Subject: [PATCH 409/420] Fix for the extracting issue on windows. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 167214c05c6..0938739215d 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ There is a portable standalone build for Windows that should work for running on Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you put your Stable Diffusion checkpoints/models (the huge ckpt/safetensors files) in: ComfyUI\models\checkpoints +If you have trouble extracting it, right click the file -> properties -> unblock + #### How do I share models between another UI and ComfyUI? See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor. From 45bf88d8ef90f3ce77d6fcb8a86a3b0e45f4dd16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristjan=20P=C3=A4rt?= Date: Mon, 22 Jan 2024 04:34:39 +0200 Subject: [PATCH 410/420] Fix queue on change to respect auto queue checkbox (#2608) * Fix render on change not respecting auto queue checkbox Fix issue where autoQueueEnabled checkbox is ignored for changes if autoQueueMode is left on `change` * Make check more specific --- web/scripts/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/scripts/ui.js b/web/scripts/ui.js index 4437345a3ff..d4835c6e445 100644 --- a/web/scripts/ui.js +++ b/web/scripts/ui.js @@ -384,7 +384,7 @@ export class ComfyUI { autoQueueModeEl.style.display = "none"; api.addEventListener("graphChanged", () => { - if (this.autoQueueMode === "change") { + if (this.autoQueueMode === "change" && this.autoQueueEnabled === true) { if (this.lastQueueSize === 0) { this.graphHasChanged = false; app.queuePrompt(0, this.batchCount); From 4871a36458e7cd4af1a7f46dd6738c406e831413 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sun, 21 Jan 2024 21:51:22 -0500 Subject: [PATCH 411/420] Cleanup some unused imports. --- comfy/clip_vision.py | 3 +-- comfy/conds.py | 1 - comfy/controlnet.py | 1 - comfy/diffusers_load.py | 1 - comfy/gligen.py | 2 +- comfy/model_base.py | 1 - comfy/sd.py | 3 --- comfy/sd1_clip.py | 1 - 8 files changed, 2 insertions(+), 11 deletions(-) diff --git a/comfy/clip_vision.py b/comfy/clip_vision.py index 200e1c6eff7..8c77ee7a922 100644 --- a/comfy/clip_vision.py +++ b/comfy/clip_vision.py @@ -1,7 +1,6 @@ -from .utils import load_torch_file, transformers_convert, common_upscale, state_dict_prefix_replace +from .utils import load_torch_file, transformers_convert, state_dict_prefix_replace import os import torch -import contextlib import json import comfy.ops diff --git a/comfy/conds.py b/comfy/conds.py index 6cff2518400..23fa48872d6 100644 --- a/comfy/conds.py +++ b/comfy/conds.py @@ -1,4 +1,3 @@ -import enum import torch import math import comfy.utils diff --git a/comfy/controlnet.py b/comfy/controlnet.py index df474201c2c..82170431ef2 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -1,7 +1,6 @@ import torch import math import os -import contextlib import comfy.utils import comfy.model_management import comfy.model_detection diff --git a/comfy/diffusers_load.py b/comfy/diffusers_load.py index c0b420e7966..98b888a1939 100644 --- a/comfy/diffusers_load.py +++ b/comfy/diffusers_load.py @@ -1,4 +1,3 @@ -import json import os import comfy.sd diff --git a/comfy/gligen.py b/comfy/gligen.py index 8d182839e05..71892dfb1d4 100644 --- a/comfy/gligen.py +++ b/comfy/gligen.py @@ -1,5 +1,5 @@ import torch -from torch import nn, einsum +from torch import nn from .ldm.modules.attention import CrossAttention from inspect import isfunction diff --git a/comfy/model_base.py b/comfy/model_base.py index 847687be461..8a843a98c39 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -6,7 +6,6 @@ import comfy.conds import comfy.ops from enum import Enum -import contextlib from . import utils class ModelType(Enum): diff --git a/comfy/sd.py b/comfy/sd.py index f49e87b1598..9ca9d1d1209 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -1,9 +1,6 @@ import torch -import contextlib -import math from comfy import model_management -from .ldm.util import instantiate_from_config from .ldm.models.autoencoder import AutoencoderKL, AutoencodingEngine import yaml diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 6ffef515ede..65ea909febc 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -6,7 +6,6 @@ import traceback import zipfile from . import model_management -import contextlib import comfy.clip_model import json From f2d432f9a754f2bacb85a70f7e2b817e6dba44f8 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 22 Jan 2024 00:28:13 -0500 Subject: [PATCH 412/420] Fix potential turbo scheduler model patching issue. --- comfy_extras/nodes_custom_sampler.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index f02cb5ef788..99f9ea7dcef 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -105,9 +105,8 @@ def INPUT_TYPES(s): def get_sigmas(self, model, steps, denoise): start_step = 10 - int(10 * denoise) timesteps = torch.flip(torch.arange(1, 11) * 100 - 1, (0,))[start_step:start_step + steps] - inner_model = model.patch_model(patch_weights=False) - sigmas = inner_model.model_sampling.sigma(timesteps) - model.unpatch_model() + comfy.model_management.load_models_gpu([model]) + sigmas = model.model.model_sampling.sigma(timesteps) sigmas = torch.cat([sigmas, sigmas.new_zeros([1])]) return (sigmas, ) From 05cd00695a84cebd5603a31f665eb7301fba2beb Mon Sep 17 00:00:00 2001 From: "Dr.Lt.Data" <128333288+ltdrdata@users.noreply.github.com> Date: Tue, 23 Jan 2024 17:47:01 +0900 Subject: [PATCH 413/420] typo fix - calculate_sigmas_scheduler (#2619) self.scheduler -> scheduler_name Co-authored-by: Lt.Dr.Data --- comfy/samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 89d8d4f2821..66a3feb10d7 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -639,7 +639,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): elif scheduler_name == "sgm_uniform": sigmas = normal_scheduler(model, steps, sgm=True) else: - print("error invalid scheduler", self.scheduler) + print("error invalid scheduler", scheduler_name) return sigmas def sampler_object(name): From 3762e676a93fcfb3ebdf6baf845971af72d4daf8 Mon Sep 17 00:00:00 2001 From: pythongosssss <125205205+pythongosssss@users.noreply.github.com> Date: Tue, 23 Jan 2024 19:15:52 +0000 Subject: [PATCH 414/420] Support refresh on group node combos (#2625) * Support refresh on group node combos * fix check --- web/extensions/core/groupNode.js | 28 ++++++++++++++++++++++++++++ web/scripts/app.js | 2 ++ 2 files changed, 30 insertions(+) diff --git a/web/extensions/core/groupNode.js b/web/extensions/core/groupNode.js index 335bddbb781..0f041fcd2f9 100644 --- a/web/extensions/core/groupNode.js +++ b/web/extensions/core/groupNode.js @@ -966,6 +966,26 @@ export class GroupNodeHandler { api.removeEventListener("executing", executing); api.removeEventListener("executed", executed); }; + + this.node.refreshComboInNode = (defs) => { + // Update combo widget options + for (const widgetName in this.groupData.newToOldWidgetMap) { + const widget = this.node.widgets.find((w) => w.name === widgetName); + if (widget?.type === "combo") { + const old = this.groupData.newToOldWidgetMap[widgetName]; + const def = defs[old.node.type]; + const input = def?.input?.required?.[old.inputName] ?? def?.input?.optional?.[old.inputName]; + if (!input) continue; + + widget.options.values = input[0]; + + if (old.inputName !== "image" && !widget.options.values.includes(widget.value)) { + widget.value = widget.options.values[0]; + widget.callback(widget.value); + } + } + } + }; } updateInnerWidgets() { @@ -1245,6 +1265,14 @@ const ext = { node[GROUP] = new GroupNodeHandler(node); } }, + async refreshComboInNodes(defs) { + // Re-register group nodes so new ones are created with the correct options + Object.assign(globalDefs, defs); + const nodes = app.graph.extra?.groupNodes; + if (nodes) { + await GroupNodeConfig.registerFromWorkflow(nodes, {}); + } + } }; app.registerExtension(ext); diff --git a/web/scripts/app.js b/web/scripts/app.js index a2cd1e31627..6df393ba60d 100644 --- a/web/scripts/app.js +++ b/web/scripts/app.js @@ -2212,6 +2212,8 @@ export class ComfyApp { } } } + + await this.#invokeExtensionsAsync("refreshComboInNodes", defs); } /** From b9911dcb2f48273479871d018823e11d03642d92 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 23 Jan 2024 20:01:37 -0500 Subject: [PATCH 415/420] Sync litegraph with repo. https://github.com/comfyanonymous/litegraph.js/pull/4 --- web/lib/litegraph.core.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/lib/litegraph.core.js b/web/lib/litegraph.core.js index 434c4a83bf1..080e0ef47da 100644 --- a/web/lib/litegraph.core.js +++ b/web/lib/litegraph.core.js @@ -11496,7 +11496,7 @@ LGraphNode.prototype.executeAction = function(action) } timeout_close = setTimeout(function() { dialog.close(); - }, 500); + }, typeof options.hide_on_mouse_leave === "number" ? options.hide_on_mouse_leave : 500); }); // if filtering, check focus changed to comboboxes and prevent closing if (options.do_type_filter){ From d1533d9c0f1dde192f738ef1b745b15f49f41e02 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 24 Jan 2024 09:49:57 -0500 Subject: [PATCH 416/420] Add experimental photomaker nodes. Put the model file in models/photomaker and use PhotoMakerLoader. Then use PhotoMakerEncode with the keyword "photomaker" to apply the image --- comfy_extras/nodes_photomaker.py | 187 +++++++++++++++++++ folder_paths.py | 2 + models/photomaker/put_photomaker_models_here | 0 nodes.py | 1 + 4 files changed, 190 insertions(+) create mode 100644 comfy_extras/nodes_photomaker.py create mode 100644 models/photomaker/put_photomaker_models_here diff --git a/comfy_extras/nodes_photomaker.py b/comfy_extras/nodes_photomaker.py new file mode 100644 index 00000000000..90130142b28 --- /dev/null +++ b/comfy_extras/nodes_photomaker.py @@ -0,0 +1,187 @@ +import torch +import torch.nn as nn +import folder_paths +import comfy.clip_model +import comfy.clip_vision +import comfy.ops + +# code for model from: https://github.com/TencentARC/PhotoMaker/blob/main/photomaker/model.py under Apache License Version 2.0 +VISION_CONFIG_DICT = { + "hidden_size": 1024, + "image_size": 224, + "intermediate_size": 4096, + "num_attention_heads": 16, + "num_channels": 3, + "num_hidden_layers": 24, + "patch_size": 14, + "projection_dim": 768, + "hidden_act": "quick_gelu", +} + +class MLP(nn.Module): + def __init__(self, in_dim, out_dim, hidden_dim, use_residual=True, operations=comfy.ops): + super().__init__() + if use_residual: + assert in_dim == out_dim + self.layernorm = operations.LayerNorm(in_dim) + self.fc1 = operations.Linear(in_dim, hidden_dim) + self.fc2 = operations.Linear(hidden_dim, out_dim) + self.use_residual = use_residual + self.act_fn = nn.GELU() + + def forward(self, x): + residual = x + x = self.layernorm(x) + x = self.fc1(x) + x = self.act_fn(x) + x = self.fc2(x) + if self.use_residual: + x = x + residual + return x + + +class FuseModule(nn.Module): + def __init__(self, embed_dim, operations): + super().__init__() + self.mlp1 = MLP(embed_dim * 2, embed_dim, embed_dim, use_residual=False, operations=operations) + self.mlp2 = MLP(embed_dim, embed_dim, embed_dim, use_residual=True, operations=operations) + self.layer_norm = operations.LayerNorm(embed_dim) + + def fuse_fn(self, prompt_embeds, id_embeds): + stacked_id_embeds = torch.cat([prompt_embeds, id_embeds], dim=-1) + stacked_id_embeds = self.mlp1(stacked_id_embeds) + prompt_embeds + stacked_id_embeds = self.mlp2(stacked_id_embeds) + stacked_id_embeds = self.layer_norm(stacked_id_embeds) + return stacked_id_embeds + + def forward( + self, + prompt_embeds, + id_embeds, + class_tokens_mask, + ) -> torch.Tensor: + # id_embeds shape: [b, max_num_inputs, 1, 2048] + id_embeds = id_embeds.to(prompt_embeds.dtype) + num_inputs = class_tokens_mask.sum().unsqueeze(0) # TODO: check for training case + batch_size, max_num_inputs = id_embeds.shape[:2] + # seq_length: 77 + seq_length = prompt_embeds.shape[1] + # flat_id_embeds shape: [b*max_num_inputs, 1, 2048] + flat_id_embeds = id_embeds.view( + -1, id_embeds.shape[-2], id_embeds.shape[-1] + ) + # valid_id_mask [b*max_num_inputs] + valid_id_mask = ( + torch.arange(max_num_inputs, device=flat_id_embeds.device)[None, :] + < num_inputs[:, None] + ) + valid_id_embeds = flat_id_embeds[valid_id_mask.flatten()] + + prompt_embeds = prompt_embeds.view(-1, prompt_embeds.shape[-1]) + class_tokens_mask = class_tokens_mask.view(-1) + valid_id_embeds = valid_id_embeds.view(-1, valid_id_embeds.shape[-1]) + # slice out the image token embeddings + image_token_embeds = prompt_embeds[class_tokens_mask] + stacked_id_embeds = self.fuse_fn(image_token_embeds, valid_id_embeds) + assert class_tokens_mask.sum() == stacked_id_embeds.shape[0], f"{class_tokens_mask.sum()} != {stacked_id_embeds.shape[0]}" + prompt_embeds.masked_scatter_(class_tokens_mask[:, None], stacked_id_embeds.to(prompt_embeds.dtype)) + updated_prompt_embeds = prompt_embeds.view(batch_size, seq_length, -1) + return updated_prompt_embeds + +class PhotoMakerIDEncoder(comfy.clip_model.CLIPVisionModelProjection): + def __init__(self): + self.load_device = comfy.model_management.text_encoder_device() + offload_device = comfy.model_management.text_encoder_offload_device() + dtype = comfy.model_management.text_encoder_dtype(self.load_device) + + super().__init__(VISION_CONFIG_DICT, dtype, offload_device, comfy.ops.manual_cast) + self.visual_projection_2 = comfy.ops.manual_cast.Linear(1024, 1280, bias=False) + self.fuse_module = FuseModule(2048, comfy.ops.manual_cast) + + def forward(self, id_pixel_values, prompt_embeds, class_tokens_mask): + b, num_inputs, c, h, w = id_pixel_values.shape + id_pixel_values = id_pixel_values.view(b * num_inputs, c, h, w) + + shared_id_embeds = self.vision_model(id_pixel_values)[2] + id_embeds = self.visual_projection(shared_id_embeds) + id_embeds_2 = self.visual_projection_2(shared_id_embeds) + + id_embeds = id_embeds.view(b, num_inputs, 1, -1) + id_embeds_2 = id_embeds_2.view(b, num_inputs, 1, -1) + + id_embeds = torch.cat((id_embeds, id_embeds_2), dim=-1) + updated_prompt_embeds = self.fuse_module(prompt_embeds, id_embeds, class_tokens_mask) + + return updated_prompt_embeds + + +class PhotoMakerLoader: + @classmethod + def INPUT_TYPES(s): + return {"required": { "photomaker_model_name": (folder_paths.get_filename_list("photomaker"), )}} + + RETURN_TYPES = ("PHOTOMAKER",) + FUNCTION = "load_photomaker_model" + + CATEGORY = "_for_testing/photomaker" + + def load_photomaker_model(self, photomaker_model_name): + photomaker_model_path = folder_paths.get_full_path("photomaker", photomaker_model_name) + photomaker_model = PhotoMakerIDEncoder() + data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True) + if "id_encoder" in data: + data = data["id_encoder"] + photomaker_model.load_state_dict(data) + return (photomaker_model,) + + +class PhotoMakerEncode: + @classmethod + def INPUT_TYPES(s): + return {"required": { "photomaker": ("PHOTOMAKER",), + "image": ("IMAGE",), + "clip": ("CLIP", ), + "text": ("STRING", {"multiline": True, "default": "photograph of photomaker"}), + }} + + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "apply_photomaker" + + CATEGORY = "_for_testing/photomaker" + + def apply_photomaker(self, photomaker, image, clip, text): + special_token = "photomaker" + pixel_values = comfy.clip_vision.clip_preprocess(image.to(photomaker.load_device)).float() + try: + index = text.split(" ").index(special_token) + 1 + except ValueError: + index = -1 + tokens = clip.tokenize(text, return_word_ids=True) + out_tokens = {} + for k in tokens: + out_tokens[k] = [] + for t in tokens[k]: + f = list(filter(lambda x: x[2] != index, t)) + while len(f) < len(t): + f.append(t[-1]) + out_tokens[k].append(f) + + cond, pooled = clip.encode_from_tokens(out_tokens, return_pooled=True) + + if index > 0: + token_index = index - 1 + num_id_images = 1 + class_tokens_mask = [True if token_index <= i < token_index+num_id_images else False for i in range(77)] + out = photomaker(id_pixel_values=pixel_values.unsqueeze(0), prompt_embeds=cond.to(photomaker.load_device), + class_tokens_mask=torch.tensor(class_tokens_mask, dtype=torch.bool, device=photomaker.load_device).unsqueeze(0)) + else: + out = cond + + return ([[out, {"pooled_output": pooled}]], ) + + +NODE_CLASS_MAPPINGS = { + "PhotoMakerLoader": PhotoMakerLoader, + "PhotoMakerEncode": PhotoMakerEncode, +} + diff --git a/folder_paths.py b/folder_paths.py index ef9b8ccfaa5..f1bf40f8c04 100644 --- a/folder_paths.py +++ b/folder_paths.py @@ -29,6 +29,8 @@ folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetworks")], supported_pt_extensions) +folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions) + folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""}) output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output") diff --git a/models/photomaker/put_photomaker_models_here b/models/photomaker/put_photomaker_models_here new file mode 100644 index 00000000000..e69de29bb2d diff --git a/nodes.py b/nodes.py index 6c7317b69b1..4ad35f79b5a 100644 --- a/nodes.py +++ b/nodes.py @@ -1943,6 +1943,7 @@ def init_custom_nodes(): "nodes_perpneg.py", "nodes_stable3d.py", "nodes_sdupscale.py", + "nodes_photomaker.py", ] for node_file in extras_files: From 89507f8adff4aff4507b6f35a67717badaecd4ac Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Thu, 25 Jan 2024 21:09:51 -0500 Subject: [PATCH 417/420] Remove some unused imports. --- comfy/ldm/modules/attention.py | 3 --- comfy/ldm/modules/diffusionmodules/openaimodel.py | 3 --- comfy/samplers.py | 4 ---- 3 files changed, 10 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index fd8888d0ee3..9c9cb761dd7 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -1,12 +1,9 @@ -from inspect import isfunction import math import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from typing import Optional, Any -from functools import partial - from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index ea936e06623..998afd977ca 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -1,12 +1,9 @@ from abc import abstractmethod -import math -import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from einops import rearrange -from functools import partial from .util import ( checkpoint, diff --git a/comfy/samplers.py b/comfy/samplers.py index 66a3feb10d7..f4c3e268f73 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -1,13 +1,9 @@ from .k_diffusion import sampling as k_diffusion_sampling from .extra_samplers import uni_pc import torch -import enum import collections from comfy import model_management import math -from comfy import model_base -import comfy.utils -import comfy.conds def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) From 2d105066df97a283fa155e39e0cc34ebbe58f55f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 26 Jan 2024 21:31:13 -0500 Subject: [PATCH 418/420] Cleanups. --- execution.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/execution.py b/execution.py index bc5cfe55c51..00908eadd46 100644 --- a/execution.py +++ b/execution.py @@ -1,12 +1,9 @@ -import os import sys import copy -import json import logging import threading import heapq import traceback -import gc import inspect from typing import List, Literal, NamedTuple, Optional From fc196aac80fd4bf6c8a39d85d1e809902871cade Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Fri, 26 Jan 2024 23:13:02 -0500 Subject: [PATCH 419/420] Add a LatentBatchSeedBehavior node. This lets you set it so the latents can use the same seed for the sampling on every image in the batch. --- comfy_extras/nodes_latent.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 2eefc4c555d..b7fd8cd687f 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -122,10 +122,34 @@ def batch(self, samples1, samples2): samples_out["batch_index"] = samples1.get("batch_index", [x for x in range(0, s1.shape[0])]) + samples2.get("batch_index", [x for x in range(0, s2.shape[0])]) return (samples_out,) +class LatentBatchSeedBehavior: + @classmethod + def INPUT_TYPES(s): + return {"required": { "samples": ("LATENT",), + "seed_behavior": (["random", "fixed"],),}} + + RETURN_TYPES = ("LATENT",) + FUNCTION = "op" + + CATEGORY = "latent/advanced" + + def op(self, samples, seed_behavior): + samples_out = samples.copy() + latent = samples["samples"] + if seed_behavior == "random": + if 'batch_index' in samples_out: + samples_out.pop('batch_index') + elif seed_behavior == "fixed": + batch_number = samples_out.get("batch_index", [0])[0] + samples_out["batch_index"] = [batch_number] * latent.shape[0] + + return (samples_out,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, "LatentMultiply": LatentMultiply, "LatentInterpolate": LatentInterpolate, "LatentBatch": LatentBatch, + "LatentBatchSeedBehavior": LatentBatchSeedBehavior, } From 7f4725f6b3f72dd8bdb60dae5dd2c3e943263bcf Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 27 Jan 2024 02:51:27 -0500 Subject: [PATCH 420/420] Fix some issues with --gpu-only --- comfy_extras/nodes_post_processing.py | 1 + comfy_extras/nodes_stable3d.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/comfy_extras/nodes_post_processing.py b/comfy_extras/nodes_post_processing.py index 71660f8a525..cb5c7d22817 100644 --- a/comfy_extras/nodes_post_processing.py +++ b/comfy_extras/nodes_post_processing.py @@ -33,6 +33,7 @@ def INPUT_TYPES(s): CATEGORY = "image/postprocessing" def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str): + image2 = image2.to(image1.device) if image1.shape != image2.shape: image2 = image2.permute(0, 3, 1, 2) image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') diff --git a/comfy_extras/nodes_stable3d.py b/comfy_extras/nodes_stable3d.py index e02a9875a97..4375d8f960e 100644 --- a/comfy_extras/nodes_stable3d.py +++ b/comfy_extras/nodes_stable3d.py @@ -46,7 +46,7 @@ def encode(self, clip_vision, init_image, vae, width, height, batch_size, elevat encode_pixels = pixels[:,:,:,:3] t = vae.encode(encode_pixels) cam_embeds = camera_embeddings(elevation, azimuth) - cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + cond = torch.cat([pooled, cam_embeds.to(pooled.device).repeat((pooled.shape[0], 1, 1))], dim=-1) positive = [[cond, {"concat_latent_image": t}]] negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t)}]]