Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [1280] and input of shape [16, 2560, 9, 9]
I have the mm_sd_v15.ckpt downloaded in stable-diffusion-webui\extensions\sd-webui-animatediff\model.
Complete log:
2023-07-18 11:19:21,548 - AnimateDiff - INFO - AnimateDiff process start with video length 2, FPS 8, motion module mm_sd_v15.ckpt.
2023-07-18 11:19:21,551 - AnimateDiff - INFO - Injecting motion module mm_sd_v15.ckpt into SD1.5 UNet input blocks.
2023-07-18 11:19:21,552 - AnimateDiff - INFO - Injecting motion module mm_sd_v15.ckpt into SD1.5 UNet output blocks.
2023-07-18 11:19:21,552 - AnimateDiff - INFO - Injection finished.
0%| | 0/30 [00:00<?, ?it/s]
*** Error completing request
*** Arguments: ('task(ba8qq6a5sxydhr6)', 'Beautiful Scenery', '', [], 30, 16, False, False, 1, 2, 7.5, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.33, 1.5, '4x_UniversalUpscalerV2-Sharper_103000_G', 10, 0, 0, 19, '', '', [], <gradio.routes.Request object at 0x000002E7E74F5420>, 0, 0, False, 'Horizontal', '1,1', False, '0.2', False, False, 'female', True, 1, True, -1.0, [], [], [], [], False, {'ad_model': 'face_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': <object object at 0x000002E7E6FB5740>}, {'ad_model': 'None', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': <object object at 0x000002E7E6FB5720>}, False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 2, 8, 'mm_sd_v15.ckpt', <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E5CB6B60>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E5C86980>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E750FE20>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E750CA90>, None, False, '0', 'G:\stablediffusion\stable-diffusion-webui\extensions/sd-webui-faceswap/models\inswapper_128.onnx', 'CodeFormer', 1, '', 1, 1, False, True, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, False, False, 0, 1, 1, 0, 0, 0, 0, False, 'Default', False, False, 'Euler a', 0.95, 0.75, 'zero', 'pos', 'linear', 0.2, 0.0, 0.75, None, 'Lanczos', 1, 0, 0, True, 0.3, 'Latent', 0.55, 0.3, 0.2, 0.2, [], False, 1.5, 1.2, False, '', '1', 'from modules.processing import process_images\n\np.width = 768\np.height = 768\np.batch_size = 2\np.steps = 10\n\nreturn process_images(p)', 2, 0, 0, 384, 384, False, False, True, True, True, 1, '', '', 8, True, 16, 'Median cut', False, None, None, '', '', '', '', 'Auto rename', {'label': 'Upload avatars config'}, 'Open outputs directory', 'Export to WebUI style', True, {'label': 'Presets'}, {'label': 'QC preview'}, '', [], 'Select', 'QC scan', 'Show pics', None, False, False, 'positive', 'comma', 0, False, False, '', 'Positive', 0, ', ', True, 32, 0, 'Median cut', 'luminance', False, 'Illustration', 'svg', True, True, False, 0.5, True, 16, True, 16, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, 'Not set', True, True, '', '', '', '', '', 1.3, 'Not set', 'Not set', 1.3, 'Not set', 1.3, 'Not set', 1.3, 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', False, 'None', 'Not set', True, False, '', '', '', '', '', 1.3, 'Not set', 'Not set', 'Not set', 1, 1.3, 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 1.3, 1.3, 1.3, 'Not set', 'Not set', 1.3, True, True, 'Disabled', None, None, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "G:\stablediffusion\stable-diffusion-webui\modules\call_queue.py", line 58, in f
res = list(func(*args, **kwargs))
File "G:\stablediffusion\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\txt2img.py", line 62, in txt2img
processed = processing.process_images(p)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 639, in process_images
res = process_images_inner(p)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 759, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 1012, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 464, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 303, in launch_sampling
return func()
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 464, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 183, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward
h = module(h, emb, context)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff.py", line 21, in mm_tes_forward
x = layer(x, context)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 79, in forward
hidden_states = self.temporal_transformer(hidden_states, encoder_hidden_states, attention_mask)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 144, in forward
hidden_states = self.norm(hidden_states)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [1280] and input of shape [4, 2560, 8, 8]
Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [1280] and input of shape [16, 2560, 9, 9]
I have the mm_sd_v15.ckpt downloaded in stable-diffusion-webui\extensions\sd-webui-animatediff\model.
Complete log:
2023-07-18 11:19:21,548 - AnimateDiff - INFO - AnimateDiff process start with video length 2, FPS 8, motion module mm_sd_v15.ckpt.
2023-07-18 11:19:21,551 - AnimateDiff - INFO - Injecting motion module mm_sd_v15.ckpt into SD1.5 UNet input blocks.
2023-07-18 11:19:21,552 - AnimateDiff - INFO - Injecting motion module mm_sd_v15.ckpt into SD1.5 UNet output blocks.
2023-07-18 11:19:21,552 - AnimateDiff - INFO - Injection finished.
0%| | 0/30 [00:00<?, ?it/s]
*** Error completing request
*** Arguments: ('task(ba8qq6a5sxydhr6)', 'Beautiful Scenery', '', [], 30, 16, False, False, 1, 2, 7.5, -1.0, -1.0, 0, 0, 0, False, 512, 512, False, 0.33, 1.5, '4x_UniversalUpscalerV2-Sharper_103000_G', 10, 0, 0, 19, '', '', [], <gradio.routes.Request object at 0x000002E7E74F5420>, 0, 0, False, 'Horizontal', '1,1', False, '0.2', False, False, 'female', True, 1, True, -1.0, [], [], [], [], False, {'ad_model': 'face_yolov8n.pt', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': <object object at 0x000002E7E6FB5740>}, {'ad_model': 'None', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'inpaint_global_harmonious', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': <object object at 0x000002E7E6FB5720>}, False, 7, 100, 'Constant', 0, 'Constant', 0, 4, True, 2, 8, 'mm_sd_v15.ckpt', <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E5CB6B60>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E5C86980>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E750FE20>, <scripts.controlnet_ui.controlnet_ui_group.UiControlNetUnit object at 0x000002E7E750CA90>, None, False, '0', 'G:\stablediffusion\stable-diffusion-webui\extensions/sd-webui-faceswap/models\inswapper_128.onnx', 'CodeFormer', 1, '', 1, 1, False, True, False, False, 0, None, [], 0, False, [], [], False, 0, 1, False, False, 0, None, [], -2, False, [], False, 0, None, None, False, False, 0, 1, 1, 0, 0, 0, 0, False, 'Default', False, False, 'Euler a', 0.95, 0.75, 'zero', 'pos', 'linear', 0.2, 0.0, 0.75, None, 'Lanczos', 1, 0, 0, True, 0.3, 'Latent', 0.55, 0.3, 0.2, 0.2, [], False, 1.5, 1.2, False, '', '1', 'from modules.processing import process_images\n\np.width = 768\np.height = 768\np.batch_size = 2\np.steps = 10\n\nreturn process_images(p)', 2, 0, 0, 384, 384, False, False, True, True, True, 1, '', '', 8, True, 16, 'Median cut', False, None, None, '', '', '', '', 'Auto rename', {'label': 'Upload avatars config'}, 'Open outputs directory', 'Export to WebUI style', True, {'label': 'Presets'}, {'label': 'QC preview'}, '', [], 'Select', 'QC scan', 'Show pics', None, False, False, 'positive', 'comma', 0, False, False, '', 'Positive', 0, ', ', True, 32, 0, 'Median cut', 'luminance', False, 'Illustration', 'svg', True, True, False, 0.5, True, 16, True, 16, 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, 'Not set', True, True, '', '', '', '', '', 1.3, 'Not set', 'Not set', 1.3, 'Not set', 1.3, 'Not set', 1.3, 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', 1.1, 'Not set', False, 'None', 'Not set', True, False, '', '', '', '', '', 1.3, 'Not set', 'Not set', 'Not set', 1, 1.3, 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 'Not set', 1.3, 1.3, 1.3, 'Not set', 'Not set', 1.3, True, True, 'Disabled', None, None, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "G:\stablediffusion\stable-diffusion-webui\modules\call_queue.py", line 58, in f
res = list(func(*args, **kwargs))
File "G:\stablediffusion\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\txt2img.py", line 62, in txt2img
processed = processing.process_images(p)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 639, in process_images
res = process_images_inner(p)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 759, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "G:\stablediffusion\stable-diffusion-webui\modules\processing.py", line 1012, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 464, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 303, in launch_sampling
return func()
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 464, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_samplers_kdiffusion.py", line 183, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1335, in forward
out = self.diffusion_model(x, t, context=cc)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\modules\sd_unet.py", line 91, in UNetModel_forward
return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 802, in forward
h = module(h, emb, context)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\scripts\animatediff.py", line 21, in mm_tes_forward
x = layer(x, context)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 79, in forward
hidden_states = self.temporal_transformer(hidden_states, encoder_hidden_states, attention_mask)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\extensions\sd-webui-animatediff\motion_module.py", line 144, in forward
hidden_states = self.norm(hidden_states)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\normalization.py", line 273, in forward
return F.group_norm(
File "G:\stablediffusion\stable-diffusion-webui\venv\lib\site-packages\torch\nn\functional.py", line 2530, in group_norm
return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected weight to be a vector of size equal to the number of channels in input, but got weight of shape [1280] and input of shape [4, 2560, 8, 8]