Unverified Commit ec59a421 authored by Matt's avatar Matt Committed by GitHub
Browse files

Revert workaround for TF safetensors loading (#30128)

* See if we can get tests to pass with the fixed weights

* See if we can get tests to pass with the fixed weights

* Replace the revisions now that we don't need them anymore
parent 841e87ef
...@@ -111,7 +111,7 @@ class GenerationIntegrationTestsMixin: ...@@ -111,7 +111,7 @@ class GenerationIntegrationTestsMixin:
article = """Justin Timberlake.""" article = """Justin Timberlake."""
gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") gpt2_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) gpt2_model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2")
input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids input_ids = gpt2_tokenizer(article, return_tensors=return_tensors).input_ids
if is_pt: if is_pt:
gpt2_model = gpt2_model.to(torch_device) gpt2_model = gpt2_model.to(torch_device)
...@@ -582,7 +582,7 @@ class GenerationIntegrationTestsMixin: ...@@ -582,7 +582,7 @@ class GenerationIntegrationTestsMixin:
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors=return_tensors) tokens = tokenizer(text, return_tensors=return_tensors)
model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2")
if is_pt: if is_pt:
model = model.to(torch_device) model = model.to(torch_device)
tokens = tokens.to(torch_device) tokens = tokens.to(torch_device)
...@@ -611,7 +611,7 @@ class GenerationIntegrationTestsMixin: ...@@ -611,7 +611,7 @@ class GenerationIntegrationTestsMixin:
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors=return_tensors) tokens = tokenizer(text, return_tensors=return_tensors)
model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2")
if is_pt: if is_pt:
model = model.to(torch_device) model = model.to(torch_device)
tokens = tokens.to(torch_device) tokens = tokens.to(torch_device)
...@@ -638,7 +638,7 @@ class GenerationIntegrationTestsMixin: ...@@ -638,7 +638,7 @@ class GenerationIntegrationTestsMixin:
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors=return_tensors) tokens = tokenizer(text, return_tensors=return_tensors)
model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=is_pt) model = model_cls.from_pretrained("hf-internal-testing/tiny-random-gpt2")
if is_pt: if is_pt:
model = model.to(torch_device) model = model.to(torch_device)
tokens = tokens.to(torch_device) tokens = tokens.to(torch_device)
......
...@@ -194,7 +194,7 @@ class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTests ...@@ -194,7 +194,7 @@ class TFGenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTests
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="tf") tokens = tokenizer(text, return_tensors="tf")
model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", use_safetensors=False) model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
eos_token_id = 638 eos_token_id = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks # forces the generation to happen on CPU, to avoid GPU-related quirks
......
...@@ -268,7 +268,6 @@ class TextGenerationPipelineTests(unittest.TestCase): ...@@ -268,7 +268,6 @@ class TextGenerationPipelineTests(unittest.TestCase):
text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer)
return text_generator, ["This is a test", "Another test"] return text_generator, ["This is a test", "Another test"]
@require_torch # See https://github.com/huggingface/transformers/issues/30117
def test_stop_sequence_stopping_criteria(self): def test_stop_sequence_stopping_criteria(self):
prompt = """Hello I believe in""" prompt = """Hello I believe in"""
text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment