Replies: 2 comments
-
Beta Was this translation helpful? Give feedback.
0 replies
-
|
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Trying update my extension from old version to SFW in A111.
But when i try to use it, i got this error:
18:55:35 - ReActor - STATUS - Checking for any unsafe content
*** Error running postprocess_image: C:\AI_GRAPHICS\stable-diffusion-webui\extensions\sd-webui-reactor-sfw\scripts\reactor_faceswap.py
Traceback (most recent call last):
File "C:\AI_GRAPHICS\stable-diffusion-webui\modules\scripts.py", line 912, in postprocess_image
script.postprocess_image(p, pp, *script_args)
File "C:\AI_GRAPHICS\stable-diffusion-webui\extensions\sd-webui-reactor-sfw\scripts\reactor_faceswap.py", line 465, in postprocess_image
result, output, swapped = swap_face(
File "C:\AI_GRAPHICS\stable-diffusion-webui\extensions\sd-webui-reactor-sfw\scripts\reactor_swapper.py", line 391, in swap_face
if check_sfw_image(result_image) is None:
File "C:\AI_GRAPHICS\stable-diffusion-webui\extensions\sd-webui-reactor-sfw\scripts\reactor_swapper.py", line 359, in check_sfw_image
if not sfw.nsfw_image(tmp_img, NSFWDET_MODEL_PATH):
File "C:\AI_GRAPHICS\stable-diffusion-webui\extensions\sd-webui-reactor-sfw\scripts\reactor_sfw.py", line 15, in nsfw_image
result = predict(img)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\pipelines\image_classification.py", line 100, in call
return super().call(images, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\pipelines\base.py", line 1120, in call
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\pipelines\base.py", line 1127, in run_single
model_outputs = self.forward(model_inputs, **forward_params)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\pipelines\base.py", line 1026, in forward
model_outputs = self._forward(model_inputs, **forward_params)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\pipelines\image_classification.py", line 108, in _forward
model_outputs = self.model(**model_inputs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\models\vit\modeling_vit.py", line 804, in forward
outputs = self.vit(
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\models\vit\modeling_vit.py", line 583, in forward
embedding_output = self.embeddings(
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\models\vit\modeling_vit.py", line 122, in forward
embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\transformers\models\vit\modeling_vit.py", line 181, in forward
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\AI_GRAPHICS\stable-diffusion-webui\extensions-builtin\Lora\networks.py", line 599, in network_Conv2d_forward
return originals.Conv2d_forward(self, input)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 460, in forward return self._conv_forward(input, self.weight, self.bias)
File "C:\AI_GRAPHICS\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\conv.py", line 456, in _conv_forward
return F.conv2d(input, weight, bias, self.stride,
RuntimeError: Input type (torch.FloatTensor) and weight type (torch.cuda.FloatTensor) should be the same or input should be a MKLDNN tensor and weight is a dense tensor
What should i do?
Beta Was this translation helpful? Give feedback.
All reactions