Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
ec276d6a
Commit
ec276d6a
authored
Oct 27, 2019
by
Lorenzo Ampil
Browse files
Add special tokens to documentation for the tensorflow model examples #1561
parent
6e011690
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
31 additions
and
31 deletions
+31
-31
transformers/modeling_tf_bert.py
transformers/modeling_tf_bert.py
+7
-7
transformers/modeling_tf_ctrl.py
transformers/modeling_tf_ctrl.py
+2
-2
transformers/modeling_tf_distilbert.py
transformers/modeling_tf_distilbert.py
+4
-4
transformers/modeling_tf_gpt2.py
transformers/modeling_tf_gpt2.py
+2
-2
transformers/modeling_tf_openai.py
transformers/modeling_tf_openai.py
+2
-2
transformers/modeling_tf_roberta.py
transformers/modeling_tf_roberta.py
+3
-3
transformers/modeling_tf_transfo_xl.py
transformers/modeling_tf_transfo_xl.py
+2
-2
transformers/modeling_tf_xlm.py
transformers/modeling_tf_xlm.py
+4
-4
transformers/modeling_tf_xlnet.py
transformers/modeling_tf_xlnet.py
+5
-5
No files found.
transformers/modeling_tf_bert.py
View file @
ec276d6a
...
@@ -647,7 +647,7 @@ class TFBertModel(TFBertPreTrainedModel):
...
@@ -647,7 +647,7 @@ class TFBertModel(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -686,7 +686,7 @@ class TFBertForPreTraining(TFBertPreTrainedModel):
...
@@ -686,7 +686,7 @@ class TFBertForPreTraining(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForPreTraining.from_pretrained('bert-base-uncased')
model = TFBertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
prediction_scores, seq_relationship_scores = outputs[:2]
...
@@ -732,7 +732,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel):
...
@@ -732,7 +732,7 @@ class TFBertForMaskedLM(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForMaskedLM.from_pretrained('bert-base-uncased')
model = TFBertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
prediction_scores = outputs[0]
prediction_scores = outputs[0]
...
@@ -776,7 +776,7 @@ class TFBertForNextSentencePrediction(TFBertPreTrainedModel):
...
@@ -776,7 +776,7 @@ class TFBertForNextSentencePrediction(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased')
model = TFBertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
seq_relationship_scores = outputs[0]
...
@@ -821,7 +821,7 @@ class TFBertForSequenceClassification(TFBertPreTrainedModel):
...
@@ -821,7 +821,7 @@ class TFBertForSequenceClassification(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
model = TFBertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
@@ -952,7 +952,7 @@ class TFBertForTokenClassification(TFBertPreTrainedModel):
...
@@ -952,7 +952,7 @@ class TFBertForTokenClassification(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForTokenClassification.from_pretrained('bert-base-uncased')
model = TFBertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
scores = outputs[0]
scores = outputs[0]
...
@@ -1005,7 +1005,7 @@ class TFBertForQuestionAnswering(TFBertPreTrainedModel):
...
@@ -1005,7 +1005,7 @@ class TFBertForQuestionAnswering(TFBertPreTrainedModel):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertForQuestionAnswering.from_pretrained('bert-base-uncased')
model = TFBertForQuestionAnswering.from_pretrained('bert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
start_scores, end_scores = outputs[:2]
...
...
transformers/modeling_tf_ctrl.py
View file @
ec276d6a
...
@@ -402,7 +402,7 @@ class TFCTRLModel(TFCTRLPreTrainedModel):
...
@@ -402,7 +402,7 @@ class TFCTRLModel(TFCTRLPreTrainedModel):
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLModel.from_pretrained('ctrl')
model = TFCTRLModel.from_pretrained('ctrl')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -465,7 +465,7 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel):
...
@@ -465,7 +465,7 @@ class TFCTRLLMHeadModel(TFCTRLPreTrainedModel):
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLLMHeadModel.from_pretrained('ctrl')
model = TFCTRLLMHeadModel.from_pretrained('ctrl')
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute")).unsqueeze(0) # Batch size 1
input_ids = torch.tensor(tokenizer.encode("Links Hello, my dog is cute"
, add_special_tokens=True
)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
loss, logits = outputs[:2]
...
...
transformers/modeling_tf_distilbert.py
View file @
ec276d6a
...
@@ -532,7 +532,7 @@ class TFDistilBertModel(TFDistilBertPreTrainedModel):
...
@@ -532,7 +532,7 @@ class TFDistilBertModel(TFDistilBertPreTrainedModel):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
model = TFDistilBertModel.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -590,7 +590,7 @@ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel):
...
@@ -590,7 +590,7 @@ class TFDistilBertForMaskedLM(TFDistilBertPreTrainedModel):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForMaskedLM.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForMaskedLM.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
prediction_scores = outputs[0]
prediction_scores = outputs[0]
...
@@ -645,7 +645,7 @@ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel):
...
@@ -645,7 +645,7 @@ class TFDistilBertForSequenceClassification(TFDistilBertPreTrainedModel):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
@@ -702,7 +702,7 @@ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel):
...
@@ -702,7 +702,7 @@ class TFDistilBertForQuestionAnswering(TFDistilBertPreTrainedModel):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased')
model = TFDistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
start_scores, end_scores = outputs[:2]
...
...
transformers/modeling_tf_gpt2.py
View file @
ec276d6a
...
@@ -436,7 +436,7 @@ class TFGPT2Model(TFGPT2PreTrainedModel):
...
@@ -436,7 +436,7 @@ class TFGPT2Model(TFGPT2PreTrainedModel):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2Model.from_pretrained('gpt2')
model = TFGPT2Model.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -477,7 +477,7 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel):
...
@@ -477,7 +477,7 @@ class TFGPT2LMHeadModel(TFGPT2PreTrainedModel):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = TFGPT2LMHeadModel.from_pretrained('gpt2')
model = TFGPT2LMHeadModel.from_pretrained('gpt2')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
...
transformers/modeling_tf_openai.py
View file @
ec276d6a
...
@@ -413,7 +413,7 @@ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
...
@@ -413,7 +413,7 @@ class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -449,7 +449,7 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
...
@@ -449,7 +449,7 @@ class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
...
transformers/modeling_tf_roberta.py
View file @
ec276d6a
...
@@ -204,7 +204,7 @@ class TFRobertaModel(TFRobertaPreTrainedModel):
...
@@ -204,7 +204,7 @@ class TFRobertaModel(TFRobertaPreTrainedModel):
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaModel.from_pretrained('roberta-base')
model = TFRobertaModel.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -281,7 +281,7 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel):
...
@@ -281,7 +281,7 @@ class TFRobertaForMaskedLM(TFRobertaPreTrainedModel):
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaForMaskedLM.from_pretrained('roberta-base')
model = TFRobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
outputs = model(input_ids, masked_lm_labels=input_ids)
prediction_scores = outputs[0]
prediction_scores = outputs[0]
...
@@ -349,7 +349,7 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel):
...
@@ -349,7 +349,7 @@ class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel):
tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
model = TFRobertaForSequenceClassification.from_pretrained('roberta-base')
model = TFRobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
...
transformers/modeling_tf_transfo_xl.py
View file @
ec276d6a
...
@@ -654,7 +654,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
...
@@ -654,7 +654,7 @@ class TFTransfoXLModel(TFTransfoXLPreTrainedModel):
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLModel.from_pretrained('transfo-xl-wt103')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states, mems = outputs[:2]
last_hidden_states, mems = outputs[:2]
...
@@ -696,7 +696,7 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
...
@@ -696,7 +696,7 @@ class TFTransfoXLLMHeadModel(TFTransfoXLPreTrainedModel):
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
prediction_scores, mems = outputs[:2]
prediction_scores, mems = outputs[:2]
...
...
transformers/modeling_tf_xlm.py
View file @
ec276d6a
...
@@ -550,7 +550,7 @@ class TFXLMModel(TFXLMPreTrainedModel):
...
@@ -550,7 +550,7 @@ class TFXLMModel(TFXLMPreTrainedModel):
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMModel.from_pretrained('xlm-mlm-en-2048')
model = TFXLMModel.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -623,7 +623,7 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel):
...
@@ -623,7 +623,7 @@ class TFXLMWithLMHeadModel(TFXLMPreTrainedModel):
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model = TFXLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -667,7 +667,7 @@ class TFXLMForSequenceClassification(TFXLMPreTrainedModel):
...
@@ -667,7 +667,7 @@ class TFXLMForSequenceClassification(TFXLMPreTrainedModel):
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
labels = tf.constant([1])[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
@@ -715,7 +715,7 @@ class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel):
...
@@ -715,7 +715,7 @@ class TFXLMForQuestionAnsweringSimple(TFXLMPreTrainedModel):
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
model = TFXLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
start_scores, end_scores = outputs[:2]
...
...
transformers/modeling_tf_xlnet.py
View file @
ec276d6a
...
@@ -791,7 +791,7 @@ class TFXLNetModel(TFXLNetPreTrainedModel):
...
@@ -791,7 +791,7 @@ class TFXLNetModel(TFXLNetPreTrainedModel):
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetModel.from_pretrained('xlnet-large-cased')
model = TFXLNetModel.from_pretrained('xlnet-large-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
...
@@ -835,7 +835,7 @@ class TFXLNetLMHeadModel(TFXLNetPreTrainedModel):
...
@@ -835,7 +835,7 @@ class TFXLNetLMHeadModel(TFXLNetPreTrainedModel):
model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased')
model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>"))[None, :] # We will predict the masked token
input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>"
, add_special_tokens=True
))[None, :] # We will predict the masked token
perm_mask = tf.zeros((1, input_ids.shape[1], input_ids.shape[1]))
perm_mask = tf.zeros((1, input_ids.shape[1], input_ids.shape[1]))
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = tf.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token
target_mapping = tf.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token
...
@@ -888,7 +888,7 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel):
...
@@ -888,7 +888,7 @@ class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel):
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = TFXLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
model = TFXLNetForSequenceClassification.from_pretrained('xlnet-large-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
logits = outputs[0]
logits = outputs[0]
...
@@ -946,7 +946,7 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
...
@@ -946,7 +946,7 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
model = TFXLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
model = TFXLNetForQuestionAnsweringSimple.from_pretrained('xlnet-base-cased')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
outputs = model(input_ids)
outputs = model(input_ids)
start_scores, end_scores = outputs[:2]
start_scores, end_scores = outputs[:2]
...
@@ -1010,7 +1010,7 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
...
@@ -1010,7 +1010,7 @@ class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel):
# tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
# tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
# model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
# model = XLMForQuestionAnswering.from_pretrained('xlnet-large-cased')
# input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
# input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"
, add_special_tokens=True
))[None, :] # Batch size 1
# start_positions = tf.constant([1])
# start_positions = tf.constant([1])
# end_positions = tf.constant([3])
# end_positions = tf.constant([3])
# outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
# outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment