Unverified Commit ee40088f authored by jiqing-feng's avatar jiqing-feng Committed by GitHub
Browse files

enable deterministic in bnb 4 bit tests (#11738)



* enable deterministic in bnb 4 bit tests
Signed-off-by: default avatarjiqing-feng <jiqing.feng@intel.com>

* fix 8bit test
Signed-off-by: default avatarjiqing-feng <jiqing.feng@intel.com>

---------
Signed-off-by: default avatarjiqing-feng <jiqing.feng@intel.com>
parent 7fc53b5d
...@@ -96,6 +96,10 @@ class Base4bitTests(unittest.TestCase): ...@@ -96,6 +96,10 @@ class Base4bitTests(unittest.TestCase):
num_inference_steps = 10 num_inference_steps = 10
seed = 0 seed = 0
@classmethod
def setUpClass(cls):
torch.use_deterministic_algorithms(True)
def get_dummy_inputs(self): def get_dummy_inputs(self):
prompt_embeds = load_pt( prompt_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt", "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
...@@ -480,7 +484,6 @@ class SlowBnb4BitTests(Base4bitTests): ...@@ -480,7 +484,6 @@ class SlowBnb4BitTests(Base4bitTests):
r""" r"""
Test that loading the model and unquantize it produce correct results. Test that loading the model and unquantize it produce correct results.
""" """
torch.use_deterministic_algorithms(True)
self.pipeline_4bit.transformer.dequantize() self.pipeline_4bit.transformer.dequantize()
output = self.pipeline_4bit( output = self.pipeline_4bit(
prompt=self.prompt, prompt=self.prompt,
......
...@@ -97,6 +97,10 @@ class Base8bitTests(unittest.TestCase): ...@@ -97,6 +97,10 @@ class Base8bitTests(unittest.TestCase):
num_inference_steps = 10 num_inference_steps = 10
seed = 0 seed = 0
@classmethod
def setUpClass(cls):
torch.use_deterministic_algorithms(True)
def get_dummy_inputs(self): def get_dummy_inputs(self):
prompt_embeds = load_pt( prompt_embeds = load_pt(
"https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt", "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt",
...@@ -485,7 +489,6 @@ class SlowBnb8bitTests(Base8bitTests): ...@@ -485,7 +489,6 @@ class SlowBnb8bitTests(Base8bitTests):
r""" r"""
Test that loading the model and unquantize it produce correct results. Test that loading the model and unquantize it produce correct results.
""" """
torch.use_deterministic_algorithms(True)
self.pipeline_8bit.transformer.dequantize() self.pipeline_8bit.transformer.dequantize()
output = self.pipeline_8bit( output = self.pipeline_8bit(
prompt=self.prompt, prompt=self.prompt,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment