"docs/vscode:/vscode.git/clone" did not exist on "dadc9fb4275f4b7c2984d16d6d9a7880ec76d872"
Unverified Commit 5b45422b authored by Thomas Wang's avatar Thomas Wang Committed by GitHub
Browse files

Remove n_ctx from configs (#14165)

* Remove n_ctx from configs

* Fix GPTJ and OpenAIGPT, both are acceptable breaking changes as there are no configs such that it breaks

* Remove unecessary n_positions from TFOpenAIGPT
parent be236361
...@@ -98,7 +98,6 @@ class TFOpenAIGPTModelTester: ...@@ -98,7 +98,6 @@ class TFOpenAIGPTModelTester:
# hidden_dropout_prob=self.hidden_dropout_prob, # hidden_dropout_prob=self.hidden_dropout_prob,
# attention_probs_dropout_prob=self.attention_probs_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings, n_positions=self.max_position_embeddings,
n_ctx=self.max_position_embeddings,
# type_vocab_size=self.type_vocab_size, # type_vocab_size=self.type_vocab_size,
# initializer_range=self.initializer_range, # initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id, pad_token_id=self.pad_token_id,
......
...@@ -490,7 +490,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): ...@@ -490,7 +490,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
_ = trainer.predict(eval_dataset) _ = trainer.predict(eval_dataset)
def test_evaluation_with_keys_to_drop(self): def test_evaluation_with_keys_to_drop(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_ctx=128, n_embd=32, n_layer=3, n_head=4) config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config) tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,)) x = torch.randint(0, 100, (128,))
eval_dataset = RepeatDataset(x) eval_dataset = RepeatDataset(x)
...@@ -531,7 +531,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): ...@@ -531,7 +531,7 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
self.assertEqual(train_output.global_step, 10) self.assertEqual(train_output.global_step, 10)
def test_logging_inf_nan_filter(self): def test_logging_inf_nan_filter(self):
config = GPT2Config(vocab_size=100, n_positions=128, n_ctx=128, n_embd=32, n_layer=3, n_head=4) config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4)
tiny_gpt2 = GPT2LMHeadModel(config) tiny_gpt2 = GPT2LMHeadModel(config)
x = torch.randint(0, 100, (128,)) x = torch.randint(0, 100, (128,))
train_dataset = RepeatDataset(x) train_dataset = RepeatDataset(x)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment