Unverified Commit b9104896 authored by Joao Gante's avatar Joao Gante Committed by GitHub
Browse files

Generate: Fix CI related to #20727 (#21003)

parent 263fd3c4
...@@ -3035,7 +3035,7 @@ class GenerationIntegrationTests(unittest.TestCase): ...@@ -3035,7 +3035,7 @@ class GenerationIntegrationTests(unittest.TestCase):
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt") tokens = tokenizer(text, return_tensors="pt").to(torch_device)
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
...@@ -3060,7 +3060,7 @@ class GenerationIntegrationTests(unittest.TestCase): ...@@ -3060,7 +3060,7 @@ class GenerationIntegrationTests(unittest.TestCase):
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt") tokens = tokenizer(text, return_tensors="pt").to(torch_device)
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
...@@ -3086,7 +3086,7 @@ class GenerationIntegrationTests(unittest.TestCase): ...@@ -3086,7 +3086,7 @@ class GenerationIntegrationTests(unittest.TestCase):
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt") tokens = tokenizer(text, return_tensors="pt").to(torch_device)
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
...@@ -3109,7 +3109,7 @@ class GenerationIntegrationTests(unittest.TestCase): ...@@ -3109,7 +3109,7 @@ class GenerationIntegrationTests(unittest.TestCase):
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and""" text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt") tokens = tokenizer(text, return_tensors="pt").to(torch_device)
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment