We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 2ff4275 commit c141e0aCopy full SHA for c141e0a
src/diffusers/models/attention_processor.py
@@ -26,6 +26,7 @@
26
27
if is_torch_xla_available():
28
from torch_xla.experimental.custom_kernel import flash_attention
29
+ import torch_xla.distributed.spmd as xs
30
import torch_xla.runtime as xr
31
XLA_AVAILABLE = True
32
else:
0 commit comments