mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2026-01-30 04:11:15 -08:00
fix bugs
Signed-off-by: zhaohu xing <920232796@qq.com>
This commit is contained in:
parent
9c86fb8cac
commit
4929503258
3 changed files with 70 additions and 3 deletions
|
|
@ -38,8 +38,8 @@ def get_optimal_device():
|
|||
if torch.cuda.is_available():
|
||||
return torch.device(get_cuda_device_string())
|
||||
|
||||
# if has_mps():
|
||||
# return torch.device("mps")
|
||||
if has_mps():
|
||||
return torch.device("mps")
|
||||
|
||||
return cpu
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
|
|||
# new memory efficient cross attention blocks do not support hypernets and we already
|
||||
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
|
||||
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
|
||||
# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
|
||||
|
||||
# silence new console spam from SD2
|
||||
ldm.modules.attention.print = lambda *args: None
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue