Unverified Commit 9ff72433 authored by fancy45daddy's avatar fancy45daddy Committed by GitHub
Browse files

add torch_xla support in pipeline_stable_audio.py (#10109)

Update pipeline_stable_audio.py
parent c1926cef
......@@ -26,6 +26,7 @@ from ...models import AutoencoderOobleck, StableAudioDiTModel
from ...models.embeddings import get_1d_rotary_pos_embed
from ...schedulers import EDMDPMSolverMultistepScheduler
from ...utils import (
is_torch_xla_available,
logging,
replace_example_docstring,
)
......@@ -33,6 +34,12 @@ from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .modeling_stable_audio import StableAudioProjectionModel
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......@@ -726,6 +733,9 @@ class StableAudioPipeline(DiffusionPipeline):
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
# 9. Post-processing
if not output_type == "latent":
audio = self.vae.decode(latents).sample
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment