Commit 0f408885 authored by PengGao's avatar PengGao Committed by GitHub
Browse files

Refactor cpu_offload checks to ensure attribute existence before accessing (#115)

parent f893a269
...@@ -434,7 +434,7 @@ class CLIPModel: ...@@ -434,7 +434,7 @@ class CLIPModel:
logger.info(f"End Loading weights from {self.checkpoint_path}") logger.info(f"End Loading weights from {self.checkpoint_path}")
def visual(self, videos, args): def visual(self, videos, args):
if args.cpu_offload: if hasattr(args, "cpu_offload") and args.cpu_offload:
self.to_cuda() self.to_cuda()
# preprocess # preprocess
size = (self.model.image_size,) * 2 size = (self.model.image_size,) * 2
...@@ -445,7 +445,7 @@ class CLIPModel: ...@@ -445,7 +445,7 @@ class CLIPModel:
with torch.amp.autocast("cuda", dtype=self.dtype): with torch.amp.autocast("cuda", dtype=self.dtype):
out = self.model.visual(videos, use_31_block=True) out = self.model.visual(videos, use_31_block=True)
if args.cpu_offload: if hasattr(args, "cpu_offload") and args.cpu_offload:
self.to_cpu() self.to_cpu()
return out return out
......
...@@ -868,7 +868,7 @@ class WanVAE: ...@@ -868,7 +868,7 @@ class WanVAE:
""" """
videos: A list of videos each with shape [C, T, H, W]. videos: A list of videos each with shape [C, T, H, W].
""" """
if args.cpu_offload: if hasattr(args, "cpu_offload") and args.cpu_offload:
self.to_cuda() self.to_cuda()
if self.use_tiling: if self.use_tiling:
...@@ -876,7 +876,7 @@ class WanVAE: ...@@ -876,7 +876,7 @@ class WanVAE:
else: else:
out = [self.model.encode(u.unsqueeze(0), self.scale).float().squeeze(0) for u in videos] out = [self.model.encode(u.unsqueeze(0), self.scale).float().squeeze(0) for u in videos]
if args.cpu_offload: if hasattr(args, "cpu_offload") and args.cpu_offload:
self.to_cpu() self.to_cpu()
return out return out
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment