"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "03bddc375bb1173c06512289759208f52b200f0f"
Unverified Commit db7d1554 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Fix/Update for doctest (#30216)



fix
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 4f7b434a
...@@ -724,7 +724,7 @@ class CTRLForSequenceClassification(CTRLPreTrainedModel): ...@@ -724,7 +724,7 @@ class CTRLForSequenceClassification(CTRLPreTrainedModel):
>>> labels = torch.tensor(1) >>> labels = torch.tensor(1)
>>> loss = model(**inputs, labels=labels).loss >>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2) >>> round(loss.item(), 2)
0.35 0.93
``` ```
Example of multi-label classification: Example of multi-label classification:
......
...@@ -732,7 +732,7 @@ class DeiTForImageClassification(DeiTPreTrainedModel): ...@@ -732,7 +732,7 @@ class DeiTForImageClassification(DeiTPreTrainedModel):
>>> # model predicts one of the 1000 ImageNet classes >>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item() >>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx]) >>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: magpie Predicted class: Polaroid camera, Polaroid Land camera
```""" ```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
...@@ -1995,10 +1995,11 @@ class DetaForObjectDetection(DetaPreTrainedModel): ...@@ -1995,10 +1995,11 @@ class DetaForObjectDetection(DetaPreTrainedModel):
... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}" ... f"{round(score.item(), 3)} at location {box}"
... ) ... )
Detected cat with confidence 0.683 at location [345.85, 23.68, 639.86, 372.83] Detected cat with confidence 0.802 at location [9.87, 54.36, 316.93, 473.44]
Detected cat with confidence 0.683 at location [8.8, 52.49, 316.93, 473.45] Detected cat with confidence 0.795 at location [346.62, 24.35, 639.62, 373.2]
Detected remote with confidence 0.568 at location [40.02, 73.75, 175.96, 117.33] Detected remote with confidence 0.725 at location [40.41, 73.36, 175.77, 117.29]
Detected remote with confidence 0.546 at location [333.68, 77.13, 370.12, 187.51] Detected remote with confidence 0.638 at location [333.34, 76.81, 370.22, 187.94]
Detected couch with confidence 0.584 at location [0.03, 0.99, 640.02, 474.93]
```""" ```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
...@@ -106,15 +106,15 @@ class GPTSanJapaneseTokenizer(PreTrainedTokenizer): ...@@ -106,15 +106,15 @@ class GPTSanJapaneseTokenizer(PreTrainedTokenizer):
>>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> tokenizer = GPTSanJapaneseTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"] >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["input_ids"]
[[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] [[35993, 35998, 8640, 25948, 35993, 35998, 30647, 35675, 35999, 35999], [35993, 35998, 10382, 9868, 35993, 35998, 30646, 9459, 30646, 35675]]
>>> # Mask for Prefix-LM inputs >>> # Mask for Prefix-LM inputs
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"] >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["token_type_ids"]
[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
>>> # Mask for padding >>> # Mask for padding
>>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"] >>> tokenizer([["武田信玄", "は、"], ["織田信長", "の配下の、"]], padding=True)["attention_mask"]
[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
``` ```
Args: Args:
......
...@@ -636,7 +636,7 @@ class ViTMSNForImageClassification(ViTMSNPreTrainedModel): ...@@ -636,7 +636,7 @@ class ViTMSNForImageClassification(ViTMSNPreTrainedModel):
>>> # model predicts one of the 1000 ImageNet classes >>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = logits.argmax(-1).item() >>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label]) >>> print(model.config.id2label[predicted_label])
Kerry blue terrier tusker
```""" ```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
...@@ -422,7 +422,7 @@ class WhisperGenerationMixin: ...@@ -422,7 +422,7 @@ class WhisperGenerationMixin:
>>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
>>> model.cuda() >>> model.cuda() # doctest: +IGNORE_RESULT
>>> # load audios > 30 seconds >>> # load audios > 30 seconds
>>> ds = load_dataset("distil-whisper/meanwhile", "default")["test"] >>> ds = load_dataset("distil-whisper/meanwhile", "default")["test"]
...@@ -441,7 +441,7 @@ class WhisperGenerationMixin: ...@@ -441,7 +441,7 @@ class WhisperGenerationMixin:
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> transcription[0] >>> transcription[0]
' Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile!' " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile."
``` ```
- *Shortform transcription*: If passed mel input features are < 30 seconds, the whole audio will be transcribed with a single call to generate. - *Shortform transcription*: If passed mel input features are < 30 seconds, the whole audio will be transcribed with a single call to generate.
......
...@@ -769,11 +769,11 @@ class YolosForObjectDetection(YolosPreTrainedModel): ...@@ -769,11 +769,11 @@ class YolosForObjectDetection(YolosPreTrainedModel):
... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}" ... f"{round(score.item(), 3)} at location {box}"
... ) ... )
Detected remote with confidence 0.994 at location [46.96, 72.61, 181.02, 119.73] Detected remote with confidence 0.991 at location [46.48, 72.78, 178.98, 119.3]
Detected remote with confidence 0.975 at location [340.66, 79.19, 372.59, 192.65] Detected remote with confidence 0.908 at location [336.48, 79.27, 368.23, 192.36]
Detected cat with confidence 0.984 at location [12.27, 54.25, 319.42, 470.99] Detected cat with confidence 0.934 at location [337.18, 18.06, 638.14, 373.09]
Detected remote with confidence 0.922 at location [41.66, 71.96, 178.7, 120.33] Detected cat with confidence 0.979 at location [10.93, 53.74, 313.41, 470.67]
Detected cat with confidence 0.914 at location [342.34, 21.48, 638.64, 372.46] Detected remote with confidence 0.974 at location [41.63, 72.23, 178.09, 119.99]
```""" ```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict return_dict = return_dict if return_dict is not None else self.config.use_return_dict
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment