Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
532a81d3
Commit
532a81d3
authored
Nov 30, 2018
by
thomwolf
Browse files
fixed doc_strings
parent
296f0061
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
17 additions
and
17 deletions
+17
-17
pytorch_pretrained_bert/modeling.py
pytorch_pretrained_bert/modeling.py
+17
-17
No files found.
pytorch_pretrained_bert/modeling.py
View file @
532a81d3
...
@@ -569,10 +569,10 @@ class BertModel(PreTrainedBertModel):
...
@@ -569,10 +569,10 @@ class BertModel(PreTrainedBertModel):
# Already been converted into WordPiece token ids
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
2
, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
1
, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=
512
,
config = modeling.BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
model = modeling.BertModel(config=config)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
...
@@ -658,10 +658,10 @@ class BertForPreTraining(PreTrainedBertModel):
...
@@ -658,10 +658,10 @@ class BertForPreTraining(PreTrainedBertModel):
# Already been converted into WordPiece token ids
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
2
, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
1
, 0]])
config = BertConfig(vocab_size=32000, hidden_size=
512
,
config = BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
model = BertForPreTraining(config)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
...
@@ -721,10 +721,10 @@ class BertForMaskedLM(PreTrainedBertModel):
...
@@ -721,10 +721,10 @@ class BertForMaskedLM(PreTrainedBertModel):
# Already been converted into WordPiece token ids
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
2
, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
1
, 0]])
config = BertConfig(vocab_size=32000, hidden_size=
512
,
config = BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
model = BertForMaskedLM(config)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
...
@@ -785,8 +785,8 @@ class BertForNextSentencePrediction(PreTrainedBertModel):
...
@@ -785,8 +785,8 @@ class BertForNextSentencePrediction(PreTrainedBertModel):
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size=32000, hidden_size=
512
,
config = BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
model = BertForNextSentencePrediction(config)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
...
@@ -845,10 +845,10 @@ class BertForSequenceClassification(PreTrainedBertModel):
...
@@ -845,10 +845,10 @@ class BertForSequenceClassification(PreTrainedBertModel):
# Already been converted into WordPiece token ids
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
2
, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
1
, 0]])
config = BertConfig(vocab_size=32000, hidden_size=
512
,
config = BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
num_labels = 2
num_labels = 2
...
@@ -989,10 +989,10 @@ class BertForQuestionAnswering(PreTrainedBertModel):
...
@@ -989,10 +989,10 @@ class BertForQuestionAnswering(PreTrainedBertModel):
# Already been converted into WordPiece token ids
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
2
, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0,
1
, 0]])
config = BertConfig(vocab_size=32000, hidden_size=
512
,
config = BertConfig(vocab_size
_or_config_json_file
=32000, hidden_size=
768
,
num_hidden_layers=
8
, num_attention_heads=
6
, intermediate_size=
1024
)
num_hidden_layers=
12
, num_attention_heads=
12
, intermediate_size=
3072
)
model = BertForQuestionAnswering(config)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment