Unverified Commit 73bf620d authored by Kashif Rasul's avatar Kashif Rasul Committed by GitHub
Browse files

fix E721 Do not compare types, use `isinstance()` (#4992)

parent c806f2fa
...@@ -1138,7 +1138,7 @@ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, Lo ...@@ -1138,7 +1138,7 @@ class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, Lo
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 7.1 Apply denoising_end # 7.1 Apply denoising_end
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int( discrete_timestep_cutoff = int(
round( round(
self.scheduler.config.num_train_timesteps self.scheduler.config.num_train_timesteps
......
...@@ -701,7 +701,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline): ...@@ -701,7 +701,7 @@ class StableDiffusionXLReferencePipeline(StableDiffusionXLPipeline):
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 10.1 Apply denoising_end # 10.1 Apply denoising_end
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int( discrete_timestep_cutoff = int(
round( round(
self.scheduler.config.num_train_timesteps self.scheduler.config.num_train_timesteps
......
...@@ -76,7 +76,7 @@ class ValueGuidedRLPipeline(DiffusionPipeline): ...@@ -76,7 +76,7 @@ class ValueGuidedRLPipeline(DiffusionPipeline):
return x_in * self.stds[key] + self.means[key] return x_in * self.stds[key] + self.means[key]
def to_torch(self, x_in): def to_torch(self, x_in):
if type(x_in) is dict: if isinstance(x_in, dict):
return {k: self.to_torch(v) for k, v in x_in.items()} return {k: self.to_torch(v) for k, v in x_in.items()}
elif torch.is_tensor(x_in): elif torch.is_tensor(x_in):
return x_in.to(self.unet.device) return x_in.to(self.unet.device)
......
...@@ -178,7 +178,7 @@ class AudioDiffusionPipeline(DiffusionPipeline): ...@@ -178,7 +178,7 @@ class AudioDiffusionPipeline(DiffusionPipeline):
self.scheduler.set_timesteps(steps) self.scheduler.set_timesteps(steps)
step_generator = step_generator or generator step_generator = step_generator or generator
# For backwards compatibility # For backwards compatibility
if type(self.unet.config.sample_size) == int: if isinstance(self.unet.config.sample_size, int):
self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size) self.unet.config.sample_size = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None: if noise is None:
noise = randn_tensor( noise = randn_tensor(
......
...@@ -810,7 +810,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoad ...@@ -810,7 +810,7 @@ class StableDiffusionXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoad
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 7.1 Apply denoising_end # 7.1 Apply denoising_end
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int( discrete_timestep_cutoff = int(
round( round(
self.scheduler.config.num_train_timesteps self.scheduler.config.num_train_timesteps
......
...@@ -885,7 +885,7 @@ class StableDiffusionXLImg2ImgPipeline( ...@@ -885,7 +885,7 @@ class StableDiffusionXLImg2ImgPipeline(
# 5. Prepare timesteps # 5. Prepare timesteps
def denoising_value_valid(dnv): def denoising_value_valid(dnv):
return type(denoising_end) == float and 0 < dnv < 1 return isinstance(denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps( timesteps, num_inference_steps = self.get_timesteps(
......
...@@ -1120,7 +1120,7 @@ class StableDiffusionXLInpaintPipeline( ...@@ -1120,7 +1120,7 @@ class StableDiffusionXLInpaintPipeline(
# 4. set timesteps # 4. set timesteps
def denoising_value_valid(dnv): def denoising_value_valid(dnv):
return type(denoising_end) == float and 0 < dnv < 1 return isinstance(denoising_end, float) and 0 < dnv < 1
self.scheduler.set_timesteps(num_inference_steps, device=device) self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, num_inference_steps = self.get_timesteps( timesteps, num_inference_steps = self.get_timesteps(
......
...@@ -837,7 +837,7 @@ class StableDiffusionXLInstructPix2PixPipeline( ...@@ -837,7 +837,7 @@ class StableDiffusionXLInstructPix2PixPipeline(
# 11. Denoising loop # 11. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int( discrete_timestep_cutoff = int(
round( round(
self.scheduler.config.num_train_timesteps self.scheduler.config.num_train_timesteps
......
...@@ -886,7 +886,7 @@ class StableDiffusionXLAdapterPipeline( ...@@ -886,7 +886,7 @@ class StableDiffusionXLAdapterPipeline(
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
# 7.1 Apply denoising_end # 7.1 Apply denoising_end
if denoising_end is not None and type(denoising_end) == float and denoising_end > 0 and denoising_end < 1: if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1:
discrete_timestep_cutoff = int( discrete_timestep_cutoff = int(
round( round(
self.scheduler.config.num_train_timesteps self.scheduler.config.num_train_timesteps
......
...@@ -193,7 +193,7 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase): ...@@ -193,7 +193,7 @@ class ConsistencyModelPipelineSlowTests(unittest.TestCase):
return inputs return inputs
def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)):
if type(device) == str: if isinstance(device, str):
device = torch.device(device) device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed) generator = torch.Generator(device=device).manual_seed(seed)
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
......
...@@ -109,7 +109,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase): ...@@ -109,7 +109,7 @@ class UniDiffuserPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
return inputs return inputs
def get_fixed_latents(self, device, seed=0): def get_fixed_latents(self, device, seed=0):
if type(device) == str: if isinstance(device, str):
device = torch.device(device) device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed) generator = torch.Generator(device=device).manual_seed(seed)
# Hardcode the shapes for now. # Hardcode the shapes for now.
...@@ -545,7 +545,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase): ...@@ -545,7 +545,7 @@ class UniDiffuserPipelineSlowTests(unittest.TestCase):
return inputs return inputs
def get_fixed_latents(self, device, seed=0): def get_fixed_latents(self, device, seed=0):
if type(device) == str: if isinstance(device, str):
device = torch.device(device) device = torch.device(device)
latent_device = torch.device("cpu") latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed) generator = torch.Generator(device=latent_device).manual_seed(seed)
...@@ -648,7 +648,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase): ...@@ -648,7 +648,7 @@ class UniDiffuserPipelineNightlyTests(unittest.TestCase):
return inputs return inputs
def get_fixed_latents(self, device, seed=0): def get_fixed_latents(self, device, seed=0):
if type(device) == str: if isinstance(device, str):
device = torch.device(device) device = torch.device(device)
latent_device = torch.device("cpu") latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed) generator = torch.Generator(device=latent_device).manual_seed(seed)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment