Unverified Commit daf9d0f1 authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

[chore] remove prints from tests. (#10505)

remove prints from tests.
parent 95c5ce4e
......@@ -132,7 +132,6 @@ class PriorTransformerTests(ModelTesterMixin, unittest.TestCase):
output = model(**input)[0]
output_slice = output[0, :5].flatten().cpu()
print(output_slice)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
......@@ -182,7 +181,6 @@ class PriorTransformerIntegrationTests(unittest.TestCase):
assert list(sample.shape) == [1, 768]
output_slice = sample[0, :8].flatten().cpu()
print(output_slice)
expected_output_slice = torch.tensor(expected_slice)
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
......@@ -175,8 +175,7 @@ def create_ip_adapter_plus_state_dict(model):
)
ip_image_projection_state_dict = OrderedDict()
keys = [k for k in image_projection.state_dict() if "layers." in k]
print(keys)
for k, v in image_projection.state_dict().items():
if "2.to" in k:
k = k.replace("2.to", "0.to")
......
......@@ -78,7 +78,7 @@ class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
expected_slice = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078]
)
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def test_pose(self):
......@@ -123,5 +123,5 @@ class FlaxControlNetPipelineIntegrationTests(unittest.TestCase):
expected_slice = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]]
)
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
......@@ -308,8 +308,6 @@ class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.Te
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
print(image_from_tuple_slice)
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.0320, 0.0860, 0.4013, 0.0518, 0.2484, 0.5847, 0.4411, 0.2321, 0.4593])
......
......@@ -146,7 +146,7 @@ class LEditsPPPipelineStableDiffusionFastTests(unittest.TestCase):
)
latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device)
print(latent_slice.flatten())
expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7104, 2.1090, -0.7822])
assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3
......@@ -167,12 +167,12 @@ class LEditsPPPipelineStableDiffusionFastTests(unittest.TestCase):
)
latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device)
print(latent_slice.flatten())
expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5657, -1.0286, -0.9961, 0.5933, 1.1173])
assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3
latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device)
print(latent_slice.flatten())
expected_slice = np.array([-0.0796, 2.0583, 0.5501, 0.5358, 0.0282, -0.2803, -1.0470, 0.7023, -0.0072])
assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3
......
......@@ -216,14 +216,14 @@ class LEditsPPPipelineStableDiffusionXLFastTests(unittest.TestCase):
)
latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device)
print(latent_slice.flatten())
expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5656, -1.0286, -0.9961, 0.5933, 1.1172])
assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3
latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device)
print(latent_slice.flatten())
expected_slice = np.array([-0.0796, 2.0583, 0.5500, 0.5358, 0.0282, -0.2803, -1.0470, 0.7024, -0.0072])
print(latent_slice.flatten())
assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3
def test_ledits_pp_warmup_steps(self):
......
......@@ -318,7 +318,7 @@ class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase):
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
print(image_slice.flatten())
expected_slice = np.array(
[0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484]
)
......@@ -339,7 +339,6 @@ class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase):
expected_slice = np.array(
[0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867]
)
print(image_slice.flatten())
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
), f"output is different from expected, {image_slice.flatten()}"
......@@ -255,7 +255,7 @@ class StableDiffusionPAGImg2ImgPipelineIntegrationTests(unittest.TestCase):
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
print(image_slice.flatten())
expected_slice = np.array(
[0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484]
)
......@@ -276,7 +276,7 @@ class StableDiffusionPAGImg2ImgPipelineIntegrationTests(unittest.TestCase):
expected_slice = np.array(
[0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867]
)
print(image_slice.flatten())
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
), f"output is different from expected, {image_slice.flatten()}"
......@@ -292,7 +292,7 @@ class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase):
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
print(image_slice.flatten())
expected_slice = np.array(
[0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625]
)
......
......@@ -206,9 +206,6 @@ class StableDiffusionInstructPix2PixPipelineFastTests(
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
slice = [round(x, 4) for x in image_slice.flatten().tolist()]
print(",".join([str(x) for x in slice]))
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986])
......
......@@ -62,7 +62,7 @@ class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
......@@ -104,5 +104,5 @@ class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
......@@ -78,5 +78,5 @@ class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
expected_slice = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]
)
print(f"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
......@@ -642,9 +642,6 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448])
debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()]
print(",".join(debug))
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_adapter_sdxl_lcm_custom_timesteps(self):
......@@ -667,7 +664,4 @@ class StableDiffusionXLMultiAdapterPipelineFastTests(
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448])
debug = [str(round(i, 4)) for i in image_slice.flatten().tolist()]
print(",".join(debug))
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
......@@ -1192,7 +1192,6 @@ class PipelineTesterMixin:
logger.setLevel(level=diffusers.logging.WARNING)
for batch_size, batched_input in zip(batch_sizes, batched_inputs):
print(batch_size, batched_input)
output = pipe(**batched_input)
assert len(output[0]) == batch_size
......
......@@ -103,8 +103,6 @@ class SASolverSchedulerTest(SchedulerCommonTest):
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 329.1999816894531) < 1e-2
assert abs(result_mean.item() - 0.4286458194255829) < 1e-3
else:
print("None")
def test_full_loop_with_v_prediction(self):
scheduler_class = self.scheduler_classes[0]
......@@ -135,8 +133,6 @@ class SASolverSchedulerTest(SchedulerCommonTest):
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 193.4154052734375) < 1e-2
assert abs(result_mean.item() - 0.2518429756164551) < 1e-3
else:
print("None")
def test_full_loop_device(self):
scheduler_class = self.scheduler_classes[0]
......@@ -166,8 +162,6 @@ class SASolverSchedulerTest(SchedulerCommonTest):
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 337.394287109375) < 1e-2
assert abs(result_mean.item() - 0.4393154978752136) < 1e-3
else:
print("None")
def test_full_loop_device_karras_sigmas(self):
scheduler_class = self.scheduler_classes[0]
......@@ -198,8 +192,6 @@ class SASolverSchedulerTest(SchedulerCommonTest):
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 837.25537109375) < 1e-2
assert abs(result_mean.item() - 1.0901763439178467) < 1e-2
else:
print("None")
def test_beta_sigmas(self):
self.check_over_configs(use_beta_sigmas=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment