Skip to content

Commit 8508df2

Browse files
Work around black image bug on Mac 14.5 by forcing attention upcasting.
1 parent 83d969e commit 8508df2

File tree

2 files changed

+16
-2
lines changed

2 files changed

+16
-2
lines changed

comfy/ldm/modules/attention.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,13 @@
1919
import comfy.ops
2020
ops = comfy.ops.disable_weight_init
2121

22+
FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype()
2223

2324
def get_attn_precision(attn_precision):
2425
if args.dont_upcast_attention:
2526
return None
26-
if attn_precision is None and args.force_upcast_attention:
27-
return torch.float32
27+
if FORCE_UPCAST_ATTENTION_DTYPE is not None:
28+
return FORCE_UPCAST_ATTENTION_DTYPE
2829
return attn_precision
2930

3031
def exists(val):

comfy/model_management.py

+13
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import comfy.utils
66
import torch
77
import sys
8+
import platform
89

910
class VRAMState(Enum):
1011
DISABLED = 0 #No vram present: no need to move models to vram
@@ -685,6 +686,18 @@ def pytorch_attention_flash_attention():
685686
return True
686687
return False
687688

689+
def force_upcast_attention_dtype():
690+
upcast = args.force_upcast_attention
691+
try:
692+
if platform.mac_ver()[0] in ['14.5']: #black image bug on OSX Sonoma 14.5
693+
upcast = True
694+
except:
695+
pass
696+
if upcast:
697+
return torch.float32
698+
else:
699+
return None
700+
688701
def get_free_memory(dev=None, torch_free_too=False):
689702
global directml_enabled
690703
if dev is None:

0 commit comments

Comments
 (0)