Signed-off-by: zhaohu xing <920232796@qq.com>
This commit is contained in:
zhaohu xing 2022-12-06 09:03:55 +08:00
parent 9c86fb8cac
commit 4929503258
3 changed files with 70 additions and 3 deletions

View file

@ -38,8 +38,8 @@ def get_optimal_device():
if torch.cuda.is_available():
return torch.device(get_cuda_device_string())
# if has_mps():
# return torch.device("mps")
if has_mps():
return torch.device("mps")
return cpu

View file

@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
# new memory efficient cross attention blocks do not support hypernets and we already
# have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention
ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention
# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention
# silence new console spam from SD2
ldm.modules.attention.print = lambda *args: None