Unverified Commit 3d4b3bc3 authored by Stefan Schweter's avatar Stefan Schweter Committed by GitHub
Browse files

examples: use correct way to get vocab size in flax lm readme (#12947)

parent 23d6761f
...@@ -114,7 +114,7 @@ from transformers import RobertaConfig ...@@ -114,7 +114,7 @@ from transformers import RobertaConfig
model_dir = "./norwegian-roberta-base" # ${MODEL_DIR} model_dir = "./norwegian-roberta-base" # ${MODEL_DIR}
config = RobertaConfig.from_pretrained("roberta-base", vocab_size=tokenizer.vocab_size) config = RobertaConfig.from_pretrained("roberta-base", vocab_size=tokenizer.get_vocab_size())
config.save_pretrained(model_dir) config.save_pretrained(model_dir)
``` ```
...@@ -349,7 +349,7 @@ from transformers import T5Config ...@@ -349,7 +349,7 @@ from transformers import T5Config
model_dir = "./norwegian-t5-base" # ${MODEL_DIR} model_dir = "./norwegian-t5-base" # ${MODEL_DIR}
config = T5Config.from_pretrained("google/t5-v1_1-base", vocab_size=tokenizer.vocab_size) config = T5Config.from_pretrained("google/t5-v1_1-base", vocab_size=tokenizer.get_vocab_size())
config.save_pretrained(model_dir) config.save_pretrained(model_dir)
``` ```
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment