We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 20447e9 commit b1fd26fCopy full SHA for b1fd26f
comfy/model_management.py
@@ -693,6 +693,8 @@ def pytorch_attention_flash_attention():
693
#TODO: more reliable way of checking for flash attention?
694
if is_nvidia(): #pytorch flash attention only works on Nvidia
695
return True
696
+ if is_intel_xpu():
697
+ return True
698
return False
699
700
def force_upcast_attention_dtype():
0 commit comments