Unverified Commit 1f948109 authored by dg845's avatar dg845 Committed by GitHub
Browse files

[docs] Fix DiffusionPipeline.enable_sequential_cpu_offload docstring (#4952)

* Fix an unmatched backtick and make description more general for DiffusionPipeline.enable_sequential_cpu_offload.

* make style

* _exclude_from_cpu_offload -> self._exclude_from_cpu_offload

* make style

* apply suggestions from review

* make style
parent 37cb819d
...@@ -1293,10 +1293,10 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin): ...@@ -1293,10 +1293,10 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
def enable_sequential_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"): def enable_sequential_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"):
r""" r"""
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state
text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward`
Note that offloading happens on a submodule basis. Memory savings are higher than with method called. Offloading happens on a submodule basis. Memory savings are higher than with
`enable_model_cpu_offload`, but performance is lower. `enable_model_cpu_offload`, but performance is lower.
""" """
if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"): if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment