Unverified Commit ae82ee6a authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix doc examples: unexpected keyword argument (#14689)



* Fix doc examples: unexpected keyword argument

* Don't delete token_type_ids from inputs
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 5b004001
...@@ -1262,7 +1262,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel): ...@@ -1262,7 +1262,7 @@ class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
>>> # compute masked indices >>> # compute masked indices
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2, device=model.device) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
...@@ -1260,7 +1260,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel): ...@@ -1260,7 +1260,7 @@ class UniSpeechSatForPreTraining(UniSpeechSatPreTrainedModel):
>>> # compute masked indices >>> # compute masked indices
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2, device=model.device) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
...@@ -1372,7 +1372,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel): ...@@ -1372,7 +1372,7 @@ class Wav2Vec2ForPreTraining(Wav2Vec2PreTrainedModel):
>>> # compute masked indices >>> # compute masked indices
>>> batch_size, raw_sequence_length = input_values.shape >>> batch_size, raw_sequence_length = input_values.shape
>>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length)
>>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2, device=model.device) >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2)
>>> with torch.no_grad(): >>> with torch.no_grad():
... outputs = model(input_values, mask_time_indices=mask_time_indices) ... outputs = model(input_values, mask_time_indices=mask_time_indices)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment