Unverified Commit db5e0c32 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix `MistralIntegrationTest` OOM (#26754)



* fix

* fix

* fix

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 72256bc7
......@@ -15,6 +15,7 @@
""" Testing suite for the PyTorch Mistral model. """
import gc
import tempfile
import unittest
......@@ -447,17 +448,23 @@ class MistralIntegrationTest(unittest.TestCase):
print(out[0, 0, :30])
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
del model
torch.cuda.empty_cache()
gc.collect()
@slow
def test_model_7b_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""My favourite condiment is mayonnaise. I love it on sandwiches, in salads, on burgers"""
)
EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(torch_device)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1").to(torch_device)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto")
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
torch.cuda.empty_cache()
gc.collect()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment