Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
40ea9ab2
Unverified
Commit
40ea9ab2
authored
Oct 12, 2023
by
Tom Aarsen
Committed by
GitHub
Oct 12, 2023
Browse files
Add many missing spaces in adjacent strings (#26751)
Add missing spaces in adjacent strings
parent
3bc65505
Changes
154
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
27 additions
and
27 deletions
+27
-27
src/transformers/models/auto/processing_auto.py
src/transformers/models/auto/processing_auto.py
+1
-1
src/transformers/models/codegen/tokenization_codegen_fast.py
src/transformers/models/codegen/tokenization_codegen_fast.py
+1
-1
src/transformers/models/conditional_detr/image_processing_conditional_detr.py
...els/conditional_detr/image_processing_conditional_detr.py
+1
-1
src/transformers/models/data2vec/modeling_data2vec_audio.py
src/transformers/models/data2vec/modeling_data2vec_audio.py
+4
-4
src/transformers/models/deformable_detr/image_processing_deformable_detr.py
...odels/deformable_detr/image_processing_deformable_detr.py
+1
-1
src/transformers/models/deta/image_processing_deta.py
src/transformers/models/deta/image_processing_deta.py
+1
-1
src/transformers/models/detr/image_processing_detr.py
src/transformers/models/detr/image_processing_detr.py
+1
-1
src/transformers/models/esm/modeling_esmfold.py
src/transformers/models/esm/modeling_esmfold.py
+1
-1
src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
...ormers/models/gptsan_japanese/modeling_gptsan_japanese.py
+1
-1
src/transformers/models/groupvit/modeling_tf_groupvit.py
src/transformers/models/groupvit/modeling_tf_groupvit.py
+1
-1
src/transformers/models/hubert/modeling_hubert.py
src/transformers/models/hubert/modeling_hubert.py
+2
-2
src/transformers/models/hubert/modeling_tf_hubert.py
src/transformers/models/hubert/modeling_tf_hubert.py
+1
-1
src/transformers/models/idefics/vision.py
src/transformers/models/idefics/vision.py
+2
-2
src/transformers/models/longt5/configuration_longt5.py
src/transformers/models/longt5/configuration_longt5.py
+1
-1
src/transformers/models/longt5/modeling_longt5.py
src/transformers/models/longt5/modeling_longt5.py
+1
-1
src/transformers/models/mt5/configuration_mt5.py
src/transformers/models/mt5/configuration_mt5.py
+1
-1
src/transformers/models/mt5/modeling_mt5.py
src/transformers/models/mt5/modeling_mt5.py
+1
-1
src/transformers/models/musicgen/modeling_musicgen.py
src/transformers/models/musicgen/modeling_musicgen.py
+3
-3
src/transformers/models/oneformer/convert_to_hf_oneformer.py
src/transformers/models/oneformer/convert_to_hf_oneformer.py
+1
-1
src/transformers/models/pix2struct/modeling_pix2struct.py
src/transformers/models/pix2struct/modeling_pix2struct.py
+1
-1
No files found.
src/transformers/models/auto/processing_auto.py
View file @
40ea9ab2
...
@@ -314,7 +314,7 @@ class AutoProcessor:
...
@@ -314,7 +314,7 @@ class AutoProcessor:
raise
ValueError
(
raise
ValueError
(
f
"Unrecognized processing class in
{
pretrained_model_name_or_path
}
. Can't instantiate a processor, a "
f
"Unrecognized processing class in
{
pretrained_model_name_or_path
}
. Can't instantiate a processor, a "
"tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains"
"tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains
"
"the files of at least one of those processing classes."
"the files of at least one of those processing classes."
)
)
...
...
src/transformers/models/codegen/tokenization_codegen_fast.py
View file @
40ea9ab2
...
@@ -144,7 +144,7 @@ class CodeGenTokenizerFast(PreTrainedTokenizerFast):
...
@@ -144,7 +144,7 @@ class CodeGenTokenizerFast(PreTrainedTokenizerFast):
if
kwargs
.
pop
(
"add_bos_token"
,
False
):
if
kwargs
.
pop
(
"add_bos_token"
,
False
):
model_id
=
kwargs
.
pop
(
"name_or_path"
,
""
)
model_id
=
kwargs
.
pop
(
"name_or_path"
,
""
)
raise
ValueError
(
raise
ValueError
(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token.
"
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows:
\n
"
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows:
\n
"
f
"`CodeGenTokenizer.from_pretrained('
{
model_id
}
')`
\n
or
\n
"
f
"`CodeGenTokenizer.from_pretrained('
{
model_id
}
')`
\n
or
\n
"
f
"`AutoTokenizer.from_pretrained('
{
model_id
}
', use_fast=False)`
\n
"
f
"`AutoTokenizer.from_pretrained('
{
model_id
}
', use_fast=False)`
\n
"
...
...
src/transformers/models/conditional_detr/image_processing_conditional_detr.py
View file @
40ea9ab2
...
@@ -1233,7 +1233,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
...
@@ -1233,7 +1233,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
if
annotations
is
not
None
:
if
annotations
is
not
None
:
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
raise
ValueError
(
raise
ValueError
(
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts"
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts
"
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"being a list of annotations in the COCO format."
"being a list of annotations in the COCO format."
)
)
...
...
src/transformers/models/data2vec/modeling_data2vec_audio.py
View file @
40ea9ab2
...
@@ -991,7 +991,7 @@ class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
...
@@ -991,7 +991,7 @@ class Data2VecAudioForCTC(Data2VecAudioPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
@@ -1116,7 +1116,7 @@ class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
...
@@ -1116,7 +1116,7 @@ class Data2VecAudioForSequenceClassification(Data2VecAudioPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
@@ -1237,7 +1237,7 @@ class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
...
@@ -1237,7 +1237,7 @@ class Data2VecAudioForAudioFrameClassification(Data2VecAudioPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
@@ -1403,7 +1403,7 @@ class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
...
@@ -1403,7 +1403,7 @@ class Data2VecAudioForXVector(Data2VecAudioPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
...
src/transformers/models/deformable_detr/image_processing_deformable_detr.py
View file @
40ea9ab2
...
@@ -1231,7 +1231,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
...
@@ -1231,7 +1231,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
if
annotations
is
not
None
:
if
annotations
is
not
None
:
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
raise
ValueError
(
raise
ValueError
(
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts"
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts
"
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"being a list of annotations in the COCO format."
"being a list of annotations in the COCO format."
)
)
...
...
src/transformers/models/deta/image_processing_deta.py
View file @
40ea9ab2
...
@@ -895,7 +895,7 @@ class DetaImageProcessor(BaseImageProcessor):
...
@@ -895,7 +895,7 @@ class DetaImageProcessor(BaseImageProcessor):
if
annotations
is
not
None
:
if
annotations
is
not
None
:
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
raise
ValueError
(
raise
ValueError
(
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts"
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts
"
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"being a list of annotations in the COCO format."
"being a list of annotations in the COCO format."
)
)
...
...
src/transformers/models/detr/image_processing_detr.py
View file @
40ea9ab2
...
@@ -1203,7 +1203,7 @@ class DetrImageProcessor(BaseImageProcessor):
...
@@ -1203,7 +1203,7 @@ class DetrImageProcessor(BaseImageProcessor):
if
annotations
is
not
None
:
if
annotations
is
not
None
:
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
if
format
==
AnnotionFormat
.
COCO_DETECTION
and
not
valid_coco_detection_annotations
(
annotations
):
raise
ValueError
(
raise
ValueError
(
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts"
"Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts
"
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
"being a list of annotations in the COCO format."
"being a list of annotations in the COCO format."
)
)
...
...
src/transformers/models/esm/modeling_esmfold.py
View file @
40ea9ab2
...
@@ -1204,7 +1204,7 @@ class EsmFoldTriangularSelfAttentionBlock(nn.Module):
...
@@ -1204,7 +1204,7 @@ class EsmFoldTriangularSelfAttentionBlock(nn.Module):
if
sequence_state_dim
!=
self
.
config
.
sequence_state_dim
:
if
sequence_state_dim
!=
self
.
config
.
sequence_state_dim
:
raise
ValueError
(
raise
ValueError
(
"`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got"
"`sequence_state` last dimension should be equal to `self.sequence_state_dim`. Got
"
f
"
{
sequence_state_dim
}
!=
{
self
.
config
.
sequence_state_dim
}
."
f
"
{
sequence_state_dim
}
!=
{
self
.
config
.
sequence_state_dim
}
."
)
)
if
pairwise_state_dim
!=
self
.
config
.
pairwise_state_dim
:
if
pairwise_state_dim
!=
self
.
config
.
pairwise_state_dim
:
...
...
src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py
View file @
40ea9ab2
...
@@ -770,7 +770,7 @@ class GPTSanJapanesePreTrainedModel(PreTrainedModel):
...
@@ -770,7 +770,7 @@ class GPTSanJapanesePreTrainedModel(PreTrainedModel):
if
decoder_start_token_id
is
None
:
if
decoder_start_token_id
is
None
:
raise
ValueError
(
raise
ValueError
(
"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id."
"self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id.
"
"See T5 docs for more information."
"See T5 docs for more information."
)
)
...
...
src/transformers/models/groupvit/modeling_tf_groupvit.py
View file @
40ea9ab2
...
@@ -58,7 +58,7 @@ if is_tensorflow_probability_available():
...
@@ -58,7 +58,7 @@ if is_tensorflow_probability_available():
_
=
tfp
.
distributions
.
Normal
(
loc
=
0.0
,
scale
=
1.0
)
_
=
tfp
.
distributions
.
Normal
(
loc
=
0.0
,
scale
=
1.0
)
except
ImportError
:
except
ImportError
:
logger
.
error
(
logger
.
error
(
"GroupViT models are not usable since `tensorflow_probability` can't be loaded."
"GroupViT models are not usable since `tensorflow_probability` can't be loaded.
"
"It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
"It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
"Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
"Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
)
)
...
...
src/transformers/models/hubert/modeling_hubert.py
View file @
40ea9ab2
...
@@ -1183,7 +1183,7 @@ class HubertForCTC(HubertPreTrainedModel):
...
@@ -1183,7 +1183,7 @@ class HubertForCTC(HubertPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
@@ -1316,7 +1316,7 @@ class HubertForSequenceClassification(HubertPreTrainedModel):
...
@@ -1316,7 +1316,7 @@ class HubertForSequenceClassification(HubertPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
...
src/transformers/models/hubert/modeling_tf_hubert.py
View file @
40ea9ab2
...
@@ -1364,7 +1364,7 @@ class TFHubertForCTC(TFHubertPreTrainedModel):
...
@@ -1364,7 +1364,7 @@ class TFHubertForCTC(TFHubertPreTrainedModel):
not be updated during training.
not be updated during training.
"""
"""
warnings
.
warn
(
warnings
.
warn
(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.
"
"Please use the equivalent `freeze_feature_encoder` method instead."
,
"Please use the equivalent `freeze_feature_encoder` method instead."
,
FutureWarning
,
FutureWarning
,
)
)
...
...
src/transformers/models/idefics/vision.py
View file @
40ea9ab2
...
@@ -115,8 +115,8 @@ class IdeficsVisionEmbeddings(nn.Module):
...
@@ -115,8 +115,8 @@ class IdeficsVisionEmbeddings(nn.Module):
fp32_upcasting
=
patch_pos_embed
.
dtype
==
torch
.
bfloat16
fp32_upcasting
=
patch_pos_embed
.
dtype
==
torch
.
bfloat16
if
fp32_upcasting
:
if
fp32_upcasting
:
logger
.
warning_once
(
logger
.
warning_once
(
"Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate"
"Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate
"
"is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead"
"is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead
.
"
)
)
patch_pos_embed
=
patch_pos_embed
.
to
(
torch
.
float
)
patch_pos_embed
=
patch_pos_embed
.
to
(
torch
.
float
)
patch_pos_embed
=
nn
.
functional
.
interpolate
(
patch_pos_embed
=
nn
.
functional
.
interpolate
(
...
...
src/transformers/models/longt5/configuration_longt5.py
View file @
40ea9ab2
...
@@ -135,7 +135,7 @@ class LongT5Config(PretrainedConfig):
...
@@ -135,7 +135,7 @@ class LongT5Config(PretrainedConfig):
if
len
(
act_info
)
>
1
and
act_info
[
0
]
!=
"gated"
or
len
(
act_info
)
>
2
:
if
len
(
act_info
)
>
1
and
act_info
[
0
]
!=
"gated"
or
len
(
act_info
)
>
2
:
raise
ValueError
(
raise
ValueError
(
f
"`feed_forward_proj`:
{
feed_forward_proj
}
is not a valid activation function of the dense layer."
f
"`feed_forward_proj`:
{
feed_forward_proj
}
is not a valid activation function of the dense layer.
"
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
"'gated-gelu' or 'relu'"
)
)
...
...
src/transformers/models/longt5/modeling_longt5.py
View file @
40ea9ab2
...
@@ -1352,7 +1352,7 @@ class LongT5PreTrainedModel(PreTrainedModel):
...
@@ -1352,7 +1352,7 @@ class LongT5PreTrainedModel(PreTrainedModel):
if
decoder_start_token_id
is
None
:
if
decoder_start_token_id
is
None
:
raise
ValueError
(
raise
ValueError
(
"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id."
"self.model.config.decoder_start_token_id has to be defined. In LongT5 it is usually set to the pad_token_id.
"
"See LongT5 docs for more information."
"See LongT5 docs for more information."
)
)
...
...
src/transformers/models/mt5/configuration_mt5.py
View file @
40ea9ab2
...
@@ -129,7 +129,7 @@ class MT5Config(PretrainedConfig):
...
@@ -129,7 +129,7 @@ class MT5Config(PretrainedConfig):
if
len
(
act_info
)
>
1
and
act_info
[
0
]
!=
"gated"
or
len
(
act_info
)
>
2
:
if
len
(
act_info
)
>
1
and
act_info
[
0
]
!=
"gated"
or
len
(
act_info
)
>
2
:
raise
ValueError
(
raise
ValueError
(
f
"`feed_forward_proj`:
{
feed_forward_proj
}
is not a valid activation function of the dense layer."
f
"`feed_forward_proj`:
{
feed_forward_proj
}
is not a valid activation function of the dense layer.
"
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
"'gated-gelu' or 'relu'"
)
)
...
...
src/transformers/models/mt5/modeling_mt5.py
View file @
40ea9ab2
...
@@ -855,7 +855,7 @@ class MT5PreTrainedModel(PreTrainedModel):
...
@@ -855,7 +855,7 @@ class MT5PreTrainedModel(PreTrainedModel):
if
decoder_start_token_id
is
None
:
if
decoder_start_token_id
is
None
:
raise
ValueError
(
raise
ValueError
(
"self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id."
"self.model.config.decoder_start_token_id has to be defined. In MT5 it is usually set to the pad_token_id.
"
"See MT5 docs for more information."
"See MT5 docs for more information."
)
)
...
...
src/transformers/models/musicgen/modeling_musicgen.py
View file @
40ea9ab2
...
@@ -1428,7 +1428,7 @@ class MusicgenForCausalLM(MusicgenPreTrainedModel):
...
@@ -1428,7 +1428,7 @@ class MusicgenForCausalLM(MusicgenPreTrainedModel):
else
:
else
:
raise
ValueError
(
raise
ValueError
(
"Got incompatible mode for generation, should be one of greedy or sampling."
"Got incompatible mode for generation, should be one of greedy or sampling.
"
"Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
"Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
)
)
...
@@ -1453,7 +1453,7 @@ class MusicgenForCausalLM(MusicgenPreTrainedModel):
...
@@ -1453,7 +1453,7 @@ class MusicgenForCausalLM(MusicgenPreTrainedModel):
@
add_start_docstrings
(
@
add_start_docstrings
(
"The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder,"
"The composite MusicGen model with a text encoder, audio encoder and Musicgen decoder,
"
"for music generation tasks with one or both of text and audio prompts."
,
"for music generation tasks with one or both of text and audio prompts."
,
MUSICGEN_START_DOCSTRING
,
MUSICGEN_START_DOCSTRING
,
)
)
...
@@ -2475,7 +2475,7 @@ class MusicgenForConditionalGeneration(PreTrainedModel):
...
@@ -2475,7 +2475,7 @@ class MusicgenForConditionalGeneration(PreTrainedModel):
else
:
else
:
raise
ValueError
(
raise
ValueError
(
"Got incompatible mode for generation, should be one of greedy or sampling."
"Got incompatible mode for generation, should be one of greedy or sampling.
"
"Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
"Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
)
)
...
...
src/transformers/models/oneformer/convert_to_hf_oneformer.py
View file @
40ea9ab2
...
@@ -1118,7 +1118,7 @@ if __name__ == "__main__":
...
@@ -1118,7 +1118,7 @@ if __name__ == "__main__":
required
=
True
,
required
=
True
,
type
=
Path
,
type
=
Path
,
help
=
(
help
=
(
"A path to OneFormer's original implementation directory. You can download from here:"
"A path to OneFormer's original implementation directory. You can download from here:
"
"https://github.com/SHI-Labs/OneFormer"
"https://github.com/SHI-Labs/OneFormer"
),
),
)
)
...
...
src/transformers/models/pix2struct/modeling_pix2struct.py
View file @
40ea9ab2
...
@@ -481,7 +481,7 @@ class Pix2StructPreTrainedModel(PreTrainedModel):
...
@@ -481,7 +481,7 @@ class Pix2StructPreTrainedModel(PreTrainedModel):
if
decoder_start_token_id
is
None
:
if
decoder_start_token_id
is
None
:
raise
ValueError
(
raise
ValueError
(
"self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id."
"self.model.config.decoder_start_token_id has to be defined. In Pix2Struct it is usually set to the pad_token_id.
"
"See Pix2Struct docs for more information."
"See Pix2Struct docs for more information."
)
)
...
...
Prev
1
2
3
4
5
6
7
8
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment