You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Hi, after the last update, i am no longer able to generate images:
every time i try to generate an image (no matters the size) i get "There is not enough GPU video memory available!" which is really strange cause i have always been able to generate images without using arguments.
I am currently using a RX590 8GB.
The error i get is the following one:
Error completing request██████████▍ | 4/20 [00:08<00:35, 2.23s/it] Arguments: ('task(5sosf4qb01nxeoo)', 'beautiful girl', '', [], 20, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 0, '', '', [], 0, False, False, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, None, 'Refresh models', <controlnet.py.UiControlNetUnit object at 0x000002B7B0B5D690>, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, None, None, False, 50) {} Traceback (most recent call last): File "E:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 37, in f res = func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\txt2img.py", line 57, in txt2img processed = processing.process_images(p) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 611, in process_images res = process_images_inner(p) File "E:\stable diffusion\stable-diffusion-webui-directml\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 729, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 977, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 383, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 257, in launch_sampling return func() File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 383, in <lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral denoised = model(x, sigmas[i] * s_in, **extra_args) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 137, in forward x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps return self.inner_model.apply_model(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_utils.py", line 17, in <lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_utils.py", line 28, in __call__ return self.__orig_func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model x_recon = self.model(x_noisy, t, **cond) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward out = self.diffusion_model(x, t, context=cc) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward h = module(h, emb, context) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward x = layer(x, context) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 334, in forward x = block(x, context=context[i]) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 269, in forward return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint return CheckpointFunction.apply(func, len(inputs), *args) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply return super().apply(*args, **kwargs) # type: ignore[misc] File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward output_tensors = ctx.run_function(*ctx.input_tensors) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 272, in _forward x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 365, in split_cross_attention_forward_invokeAI r = einsum_op(q, k, v) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 340, in einsum_op return einsum_op_dml(q, k, v) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 328, in einsum_op_dml return einsum_op_tensor_mem(q, k, v, (mem_reserved - mem_active) if mem_reserved > mem_active else 1) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 313, in einsum_op_tensor_mem return einsum_op_slice_1(q, k, v, max(q.shape[1] // div, 1)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 288, in einsum_op_slice_1 r[:, i:end] = einsum_op_compvis(q[:, i:end], k, v) RuntimeError: Could not allocate tensor with 4915840 bytes. There is not enough GPU video memory available!
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
Hi, after the last update, i am no longer able to generate images:
every time i try to generate an image (no matters the size) i get "There is not enough GPU video memory available!" which is really strange cause i have always been able to generate images without using arguments.
I am currently using a RX590 8GB.
The error i get is the following one:
Error completing request██████████▍ | 4/20 [00:08<00:35, 2.23s/it] Arguments: ('task(5sosf4qb01nxeoo)', 'beautiful girl', '', [], 20, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 0, '', '', [], 0, False, False, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, 'LoRA', 'None', 1, 1, None, 'Refresh models', <controlnet.py.UiControlNetUnit object at 0x000002B7B0B5D690>, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, None, None, False, 50) {} Traceback (most recent call last): File "E:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 57, in f res = list(func(*args, **kwargs)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 37, in f res = func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\txt2img.py", line 57, in txt2img processed = processing.process_images(p) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 611, in process_images res = process_images_inner(p) File "E:\stable diffusion\stable-diffusion-webui-directml\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 729, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 977, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 383, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 257, in launch_sampling return func() File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 383, in <lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={ File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral denoised = model(x, sigmas[i] * s_in, **extra_args) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_samplers_kdiffusion.py", line 137, in forward x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict([cond_in], image_cond_in)) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps return self.inner_model.apply_model(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_utils.py", line 17, in <lambda> setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_utils.py", line 28, in __call__ return self.__orig_func(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model x_recon = self.model(x_noisy, t, **cond) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward out = self.diffusion_model(x, t, context=cc) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward h = module(h, emb, context) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward x = layer(x, context) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 334, in forward x = block(x, context=context[i]) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 269, in forward return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint return CheckpointFunction.apply(func, len(inputs), *args) File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply return super().apply(*args, **kwargs) # type: ignore[misc] File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 136, in forward output_tensors = ctx.run_function(*ctx.input_tensors) File "E:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 272, in _forward x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x File "E:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 365, in split_cross_attention_forward_invokeAI r = einsum_op(q, k, v) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 340, in einsum_op return einsum_op_dml(q, k, v) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 328, in einsum_op_dml return einsum_op_tensor_mem(q, k, v, (mem_reserved - mem_active) if mem_reserved > mem_active else 1) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 313, in einsum_op_tensor_mem return einsum_op_slice_1(q, k, v, max(q.shape[1] // div, 1)) File "E:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_optimizations.py", line 288, in einsum_op_slice_1 r[:, i:end] = einsum_op_compvis(q[:, i:end], k, v) RuntimeError: Could not allocate tensor with 4915840 bytes. There is not enough GPU video memory available!
Beta Was this translation helpful? Give feedback.
All reactions