diff --git a/comfy/hooks.py b/comfy/hooks.py index 9d073107290..5765ad85c03 100644 --- a/comfy/hooks.py +++ b/comfy/hooks.py @@ -575,7 +575,7 @@ def get_sorted_list_via_attr(objects: list, attr: str) -> list: unique_attrs = {} for o in objects: val_attr = getattr(o, attr) - attr_list: list = unique_attrs.get(val_attr, list()) + attr_list: list = unique_attrs.get(val_attr, []) attr_list.append(o) if val_attr not in unique_attrs: unique_attrs[val_attr] = attr_list diff --git a/comfy/ldm/cosmos/model.py b/comfy/ldm/cosmos/model.py index 05dd3846960..b8135ea92b4 100644 --- a/comfy/ldm/cosmos/model.py +++ b/comfy/ldm/cosmos/model.py @@ -217,19 +217,19 @@ def build_pos_embed(self, device=None): raise ValueError(f"Unknown pos_emb_cls {self.pos_emb_cls}") logging.debug(f"Building positional embedding with {self.pos_emb_cls} class, impl {cls_type}") - kwargs = dict( - model_channels=self.model_channels, - len_h=self.max_img_h // self.patch_spatial, - len_w=self.max_img_w // self.patch_spatial, - len_t=self.max_frames // self.patch_temporal, - is_learnable=self.pos_emb_learnable, - interpolation=self.pos_emb_interpolation, - head_dim=self.model_channels // self.num_heads, - h_extrapolation_ratio=self.rope_h_extrapolation_ratio, - w_extrapolation_ratio=self.rope_w_extrapolation_ratio, - t_extrapolation_ratio=self.rope_t_extrapolation_ratio, - device=device, - ) + kwargs = { + "model_channels": self.model_channels, + "len_h": self.max_img_h // self.patch_spatial, + "len_w": self.max_img_w // self.patch_spatial, + "len_t": self.max_frames // self.patch_temporal, + "is_learnable": self.pos_emb_learnable, + "interpolation": self.pos_emb_interpolation, + "head_dim": self.model_channels // self.num_heads, + "h_extrapolation_ratio": self.rope_h_extrapolation_ratio, + "w_extrapolation_ratio": self.rope_w_extrapolation_ratio, + "t_extrapolation_ratio": self.rope_t_extrapolation_ratio, + "device": device, + } self.pos_embedder = cls_type( **kwargs, ) diff --git a/comfy/ldm/models/autoencoder.py b/comfy/ldm/models/autoencoder.py index e6493155ef7..4aef9e894b2 100644 --- a/comfy/ldm/models/autoencoder.py +++ b/comfy/ldm/models/autoencoder.py @@ -19,7 +19,7 @@ def get_trainable_parameters(self) -> Any: yield from () def forward(self, z: torch.Tensor) -> Tuple[torch.Tensor, dict]: - log = dict() + log = {} posterior = DiagonalGaussianDistribution(z) if self.sample: z = posterior.sample() @@ -88,7 +88,7 @@ def decode(self, *args, **kwargs) -> torch.Tensor: def instantiate_optimizer_from_config(self, params, lr, cfg): logging.info(f"loading >>> {cfg['target']} <<< optimizer from config") return get_obj_from_str(cfg["target"])( - params, lr=lr, **cfg.get("params", dict()) + params, lr=lr, **cfg.get("params", {}) ) def configure_optimizers(self) -> Any: @@ -129,7 +129,7 @@ def encode( ) -> Union[torch.Tensor, Tuple[torch.Tensor, dict]]: z = self.encoder(x) if unregularized: - return z, dict() + return z, {} z, reg_log = self.regularization(z) if return_reg_log: return z, reg_log @@ -191,7 +191,7 @@ def encode( N = x.shape[0] bs = self.max_batch_size n_batches = int(math.ceil(N / bs)) - z = list() + z = [] for i_batch in range(n_batches): z_batch = self.encoder(x[i_batch * bs : (i_batch + 1) * bs]) z_batch = self.quant_conv(z_batch) @@ -211,7 +211,7 @@ def decode(self, z: torch.Tensor, **decoder_kwargs) -> torch.Tensor: N = z.shape[0] bs = self.max_batch_size n_batches = int(math.ceil(N / bs)) - dec = list() + dec = [] for i_batch in range(n_batches): dec_batch = self.post_quant_conv(z[i_batch * bs : (i_batch + 1) * bs]) dec_batch = self.decoder(dec_batch, **decoder_kwargs) diff --git a/comfy/ldm/util.py b/comfy/ldm/util.py index 30b4b472105..ebbf59d7bf1 100644 --- a/comfy/ldm/util.py +++ b/comfy/ldm/util.py @@ -13,7 +13,7 @@ def log_txt_as_img(wh, xc, size=10): # wh a tuple of (width, height) # xc a list of captions to plot b = len(xc) - txts = list() + txts = [] for bi in range(b): txt = Image.new("RGB", wh, color="white") draw = ImageDraw.Draw(txt) @@ -77,7 +77,7 @@ def instantiate_from_config(config): elif config == "__is_unconditional__": return None raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) + return get_obj_from_str(config["target"])(**config.get("params", {})) def get_obj_from_str(string, reload=False): @@ -106,9 +106,9 @@ def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: che raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= ema_decay <= 1.0: raise ValueError("Invalid ema_decay value: {}".format(ema_decay)) - defaults = dict(lr=lr, betas=betas, eps=eps, - weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay, - ema_power=ema_power, param_names=param_names) + defaults = {"lr": lr, "betas": betas, "eps": eps, + "weight_decay": weight_decay, "amsgrad": amsgrad, "ema_decay": ema_decay, + "ema_power": ema_power, "param_names": param_names} super().__init__(params, defaults) def __setstate__(self, state): diff --git a/comfy/samplers.py b/comfy/samplers.py index 5cc33a7d9f6..728d56c71b0 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -185,7 +185,7 @@ def finalize_default_conds(model: 'BaseModel', hooked_to_run: dict[comfy.hooks.H p = p._replace(mult=mult) if p.hooks is not None: model.current_patcher.prepare_hook_patches_current_keyframe(timestep, p.hooks, model_options) - hooked_to_run.setdefault(p.hooks, list()) + hooked_to_run.setdefault(p.hooks, []) hooked_to_run[p.hooks] += [(p, i)] def calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Tensor, timestep, model_options): @@ -220,7 +220,7 @@ def _calc_cond_batch(model: 'BaseModel', conds: list[list[dict]], x_in: torch.Te continue if p.hooks is not None: model.current_patcher.prepare_hook_patches_current_keyframe(timestep, p.hooks, model_options) - hooked_to_run.setdefault(p.hooks, list()) + hooked_to_run.setdefault(p.hooks, []) hooked_to_run[p.hooks] += [(p, i)] default_conds.append(default_c) diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 95d41c30fcc..0923615c9c2 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -26,7 +26,7 @@ def gen_empty_tokens(special_tokens, length): class ClipTokenWeightEncoder: def encode_token_weights(self, token_weight_pairs): - to_encode = list() + to_encode = [] max_token_len = 0 has_weights = False for x in token_weight_pairs: diff --git a/comfy_extras/nodes_audio.py b/comfy_extras/nodes_audio.py index 3cb918e09d3..5bf04abe7b9 100644 --- a/comfy_extras/nodes_audio.py +++ b/comfy_extras/nodes_audio.py @@ -164,7 +164,7 @@ def INPUT_TYPES(s): def save_audio(self, audio, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir) - results = list() + results = [] metadata = {} if not args.disable_metadata: diff --git a/comfy_extras/nodes_images.py b/comfy_extras/nodes_images.py index af37666b29f..85dba641e92 100644 --- a/comfy_extras/nodes_images.py +++ b/comfy_extras/nodes_images.py @@ -99,7 +99,7 @@ def save_images(self, images, fps, filename_prefix, lossless, quality, method, n method = self.methods.get(method) filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) - results = list() + results = [] pil_images = [] for image in images: i = 255. * image.cpu().numpy() @@ -160,7 +160,7 @@ def INPUT_TYPES(s): def save_images(self, images, fps, compress_level, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) - results = list() + results = [] pil_images = [] for image in images: i = 255. * image.cpu().numpy() diff --git a/execution.py b/execution.py index 2c979205ba9..a145350a99f 100644 --- a/execution.py +++ b/execution.py @@ -232,7 +232,7 @@ def get_output_data(obj, input_data_all, execution_block_cb=None, pre_execute_cb output = merge_result_data(results, obj) else: output = [] - ui = dict() + ui = {} if len(uis) > 0: ui = {k: [y for x in uis for y in x[k]] for k in uis[0].keys()} return output, ui, has_subgraph diff --git a/nodes.py b/nodes.py index cfd7dd8a453..009c777aa15 100644 --- a/nodes.py +++ b/nodes.py @@ -477,7 +477,7 @@ def save(self, samples, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=No file = f"{filename}_{counter:05}_.latent" - results = list() + results = [] results.append({ "filename": file, "subfolder": subfolder, @@ -1596,7 +1596,7 @@ def INPUT_TYPES(s): def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None): filename_prefix += self.prefix_append full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) - results = list() + results = [] for (batch_number, image) in enumerate(images): i = 255. * image.cpu().numpy() img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) diff --git a/pyproject.toml b/pyproject.toml index b747d6ef78c..296bc45de3d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,5 +19,6 @@ lint.select = [ # The "F" series in Ruff stands for "Pyflakes" rules, which catch various Python syntax errors and undefined names. # See all rules here: https://docs.astral.sh/ruff/rules/#pyflakes-f "F", + "C408", # unnecessary dict(), list() or tuple() calls that can be rewritten as empty literals. ] exclude = ["*.ipynb"] diff --git a/server.py b/server.py index bae898ef5d6..b58ed748158 100644 --- a/server.py +++ b/server.py @@ -157,7 +157,7 @@ def __init__(self, loop): max_upload_size = round(args.max_upload_size * 1024 * 1024) self.app = web.Application(client_max_size=max_upload_size, middlewares=middlewares) - self.sockets = dict() + self.sockets = {} self.web_root = ( FrontendManager.init_frontend(args.front_end_version) if args.front_end_root is None