File tree 2 files changed +16
-2
lines changed
2 files changed +16
-2
lines changed Original file line number Diff line number Diff line change 19
19
import comfy .ops
20
20
ops = comfy .ops .disable_weight_init
21
21
22
+ FORCE_UPCAST_ATTENTION_DTYPE = model_management .force_upcast_attention_dtype ()
22
23
23
24
def get_attn_precision (attn_precision ):
24
25
if args .dont_upcast_attention :
25
26
return None
26
- if attn_precision is None and args . force_upcast_attention :
27
- return torch . float32
27
+ if FORCE_UPCAST_ATTENTION_DTYPE is not None :
28
+ return FORCE_UPCAST_ATTENTION_DTYPE
28
29
return attn_precision
29
30
30
31
def exists (val ):
Original file line number Diff line number Diff line change 5
5
import comfy .utils
6
6
import torch
7
7
import sys
8
+ import platform
8
9
9
10
class VRAMState (Enum ):
10
11
DISABLED = 0 #No vram present: no need to move models to vram
@@ -685,6 +686,18 @@ def pytorch_attention_flash_attention():
685
686
return True
686
687
return False
687
688
689
+ def force_upcast_attention_dtype ():
690
+ upcast = args .force_upcast_attention
691
+ try :
692
+ if platform .mac_ver ()[0 ] in ['14.5' ]: #black image bug on OSX Sonoma 14.5
693
+ upcast = True
694
+ except :
695
+ pass
696
+ if upcast :
697
+ return torch .float32
698
+ else :
699
+ return None
700
+
688
701
def get_free_memory (dev = None , torch_free_too = False ):
689
702
global directml_enabled
690
703
if dev is None :
You can’t perform that action at this time.
0 commit comments