Unverified Commit 2c668423 authored by Sebastian Olsson's avatar Sebastian Olsson Committed by GitHub
Browse files

Correct AutoConfig call docstrings (#10822)

parent 8fb46718
...@@ -78,7 +78,7 @@ def model(*args, **kwargs): ...@@ -78,7 +78,7 @@ def model(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading model = torch.hub.load('huggingface/transformers', 'model', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower) # Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) model = torch.hub.load('huggingface/transformers', 'model', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
...@@ -97,7 +97,7 @@ def modelWithLMHead(*args, **kwargs): ...@@ -97,7 +97,7 @@ def modelWithLMHead(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attentions=True) # Update configuration during loading model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower) # Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) model = torch.hub.load('huggingface/transformers', 'modelWithLMHead', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
...@@ -115,7 +115,7 @@ def modelForSequenceClassification(*args, **kwargs): ...@@ -115,7 +115,7 @@ def modelForSequenceClassification(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower) # Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) model = torch.hub.load('huggingface/transformers', 'modelForSequenceClassification', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
...@@ -134,7 +134,7 @@ def modelForQuestionAnswering(*args, **kwargs): ...@@ -134,7 +134,7 @@ def modelForQuestionAnswering(*args, **kwargs):
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', 'bert-base-uncased', output_attentions=True) # Update configuration during loading
assert model.config.output_attentions == True assert model.config.output_attentions == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower) # Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) model = torch.hub.load('huggingface/transformers', 'modelForQuestionAnswering', './tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
......
...@@ -801,7 +801,7 @@ class AutoModel: ...@@ -801,7 +801,7 @@ class AutoModel:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -895,7 +895,7 @@ class AutoModelForPreTraining: ...@@ -895,7 +895,7 @@ class AutoModelForPreTraining:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1000,7 +1000,7 @@ class AutoModelWithLMHead: ...@@ -1000,7 +1000,7 @@ class AutoModelWithLMHead:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
warnings.warn( warnings.warn(
...@@ -1099,7 +1099,7 @@ class AutoModelForCausalLM: ...@@ -1099,7 +1099,7 @@ class AutoModelForCausalLM:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/gpt2_tf_model_config.json')
>>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1192,7 +1192,7 @@ class AutoModelForMaskedLM: ...@@ -1192,7 +1192,7 @@ class AutoModelForMaskedLM:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1288,7 +1288,7 @@ class AutoModelForSeq2SeqLM: ...@@ -1288,7 +1288,7 @@ class AutoModelForSeq2SeqLM:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/t5_tf_model_config.json')
>>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1386,7 +1386,7 @@ class AutoModelForSequenceClassification: ...@@ -1386,7 +1386,7 @@ class AutoModelForSequenceClassification:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1483,7 +1483,7 @@ class AutoModelForQuestionAnswering: ...@@ -1483,7 +1483,7 @@ class AutoModelForQuestionAnswering:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1583,7 +1583,7 @@ class AutoModelForTableQuestionAnswering: ...@@ -1583,7 +1583,7 @@ class AutoModelForTableQuestionAnswering:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/tapas_tf_checkpoint.json') >>> config = AutoConfig.from_pretrained('./tf_model/tapas_tf_checkpoint.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/tapas_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/tapas_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1681,7 +1681,7 @@ class AutoModelForTokenClassification: ...@@ -1681,7 +1681,7 @@ class AutoModelForTokenClassification:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1781,7 +1781,7 @@ class AutoModelForMultipleChoice: ...@@ -1781,7 +1781,7 @@ class AutoModelForMultipleChoice:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1881,7 +1881,7 @@ class AutoModelForNextSentencePrediction: ...@@ -1881,7 +1881,7 @@ class AutoModelForNextSentencePrediction:
True True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower) >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json') >>> config = AutoConfig.from_pretrained('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config) >>> model = AutoModelForNextSentencePrediction.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
......
...@@ -605,7 +605,7 @@ class TFAutoModel(object): ...@@ -605,7 +605,7 @@ class TFAutoModel(object):
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModel.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -699,7 +699,7 @@ class TFAutoModelForPreTraining(object): ...@@ -699,7 +699,7 @@ class TFAutoModelForPreTraining(object):
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForPreTraining.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForPreTraining.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -804,7 +804,7 @@ class TFAutoModelWithLMHead(object): ...@@ -804,7 +804,7 @@ class TFAutoModelWithLMHead(object):
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelWithLMHead.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
warnings.warn( warnings.warn(
...@@ -904,7 +904,7 @@ class TFAutoModelForCausalLM: ...@@ -904,7 +904,7 @@ class TFAutoModelForCausalLM:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/gpt2_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/gpt2_pt_model_config.json')
>>> model = TFAutoModelForCausalLM.from_pretrained('./pt_model/gpt2_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForCausalLM.from_pretrained('./pt_model/gpt2_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -997,7 +997,7 @@ class TFAutoModelForMaskedLM: ...@@ -997,7 +997,7 @@ class TFAutoModelForMaskedLM:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForMaskedLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForMaskedLM.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1093,7 +1093,7 @@ class TFAutoModelForSeq2SeqLM: ...@@ -1093,7 +1093,7 @@ class TFAutoModelForSeq2SeqLM:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/t5_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/t5_pt_model_config.json')
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained('./pt_model/t5_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForSeq2SeqLM.from_pretrained('./pt_model/t5_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1191,7 +1191,7 @@ class TFAutoModelForSequenceClassification(object): ...@@ -1191,7 +1191,7 @@ class TFAutoModelForSequenceClassification(object):
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForSequenceClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1288,7 +1288,7 @@ class TFAutoModelForQuestionAnswering(object): ...@@ -1288,7 +1288,7 @@ class TFAutoModelForQuestionAnswering(object):
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForQuestionAnswering.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1384,7 +1384,7 @@ class TFAutoModelForTokenClassification: ...@@ -1384,7 +1384,7 @@ class TFAutoModelForTokenClassification:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForTokenClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForTokenClassification.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1482,7 +1482,7 @@ class TFAutoModelForMultipleChoice: ...@@ -1482,7 +1482,7 @@ class TFAutoModelForMultipleChoice:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForMultipleChoice.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForMultipleChoice.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
...@@ -1580,7 +1580,7 @@ class TFAutoModelForNextSentencePrediction: ...@@ -1580,7 +1580,7 @@ class TFAutoModelForNextSentencePrediction:
True True
>>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower) >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
>>> config = AutoConfig.from_json_file('./pt_model/bert_pt_model_config.json') >>> config = AutoConfig.from_pretrained('./pt_model/bert_pt_model_config.json')
>>> model = TFAutoModelForNextSentencePrediction.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config) >>> model = TFAutoModelForNextSentencePrediction.from_pretrained('./pt_model/bert_pytorch_model.bin', from_pt=True, config=config)
""" """
config = kwargs.pop("config", None) config = kwargs.pop("config", None)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment