Unverified Commit 12b6880c authored by Sai-Suraj-27's avatar Sai-Suraj-27 Committed by GitHub
Browse files

fix: Fixed raising `TypeError` instead of `ValueError` for invalid type (#32111)

* Raised TypeError instead of ValueError for invalid types.

* Updated formatting using ruff.

* Retrieved few changes.

* Retrieved few changes.

* Updated tests accordingly.
parent d1ec36b9
......@@ -59,7 +59,7 @@ class GroupedBatchSampler(BatchSampler):
def __init__(self, sampler, group_ids, batch_size):
if not isinstance(sampler, Sampler):
raise ValueError(
raise TypeError(
"sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler)
)
self.sampler = sampler
......
......@@ -48,7 +48,7 @@ def convert_to_float(value):
if isinstance(value, int):
return float(value)
if not isinstance(value, str):
raise ValueError("Argument value is not a string. Can't parse it as float")
raise TypeError("Argument value is not a string. Can't parse it as float")
sanitized = value
try:
......@@ -158,7 +158,7 @@ def _respect_conditions(table, row, conditions):
cmp_value = _normalize_for_match(cmp_value)
if not isinstance(table_value, type(cmp_value)):
raise ValueError("Type difference {} != {}".format(type(table_value), type(cmp_value)))
raise TypeError("Type difference {} != {}".format(type(table_value), type(cmp_value)))
if not _compare(cond.operator, table_value, cmp_value):
return False
......
......@@ -107,7 +107,7 @@ class AgentImage(AgentType, ImageType):
elif isinstance(value, np.ndarray):
self._tensor = torch.tensor(value)
else:
raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}")
raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}")
def _ipython_display_(self, include=None, exclude=None):
"""
......
......@@ -1004,7 +1004,7 @@ class PretrainedConfig(PushToHubMixin):
elif isinstance(old_v, float):
v = float(v)
elif not isinstance(old_v, str):
raise ValueError(
raise TypeError(
f"You can only update int, float, bool or string values in the config, got {v} for key {k}"
)
......
......@@ -47,11 +47,11 @@ class XnliProcessor(DataProcessor):
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
......@@ -70,11 +70,11 @@ class XnliProcessor(DataProcessor):
text_b = line[7]
label = line[1]
if not isinstance(text_a, str):
raise ValueError(f"Training input {text_a} is not a string")
raise TypeError(f"Training input {text_a} is not a string")
if not isinstance(text_b, str):
raise ValueError(f"Training input {text_b} is not a string")
raise TypeError(f"Training input {text_b} is not a string")
if not isinstance(label, str):
raise ValueError(f"Training label {label} is not a string")
raise TypeError(f"Training label {label} is not a string")
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
......
......@@ -156,7 +156,7 @@ class PhrasalConstraint(Constraint):
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
if self.completed:
return False
......@@ -165,7 +165,7 @@ class PhrasalConstraint(Constraint):
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
......@@ -300,7 +300,7 @@ class DisjunctiveConstraint(Constraint):
def does_advance(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
next_tokens = self.trie.next_tokens(self.current_seq)
......@@ -308,7 +308,7 @@ class DisjunctiveConstraint(Constraint):
def update(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
stepped = False
completed = False
......@@ -432,7 +432,7 @@ class ConstraintListState:
def add(self, token_id: int):
if not isinstance(token_id, int):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
raise TypeError(f"`token_id` should be an `int`, but is `{token_id}`.")
complete, stepped = False, False
......
......@@ -4281,7 +4281,7 @@ def _split(data, full_batch_size: int, split_size: int = None):
for i in range(0, full_batch_size, split_size)
]
else:
raise ValueError(f"Unexpected attribute type: {type(data)}")
raise TypeError(f"Unexpected attribute type: {type(data)}")
def _split_model_inputs(
......@@ -4388,7 +4388,7 @@ def stack_model_outputs(model_outputs: List[ModelOutput]) -> ModelOutput:
# If the elements are integers or floats, return a tensor
return torch.tensor(data)
else:
raise ValueError(f"Unexpected attribute type: {type(data[0])}")
raise TypeError(f"Unexpected attribute type: {type(data[0])}")
# Use a dictionary comprehension to gather attributes from all objects and concatenate them
concatenated_data = {
......
......@@ -544,7 +544,7 @@ class ImageProcessingMixin(PushToHubMixin):
response.raise_for_status()
return Image.open(BytesIO(response.content))
else:
raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
raise TypeError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}")
ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub)
......
......@@ -75,7 +75,7 @@ def to_channel_dimension_format(
`np.ndarray`: The image with the channel dimension set to `channel_dim`.
"""
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
if input_channel_dim is None:
input_channel_dim = infer_channel_dimension_format(image)
......@@ -121,7 +121,7 @@ def rescale(
`np.ndarray`: The rescaled image.
"""
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
rescaled_image = image * scale
if data_format is not None:
......@@ -453,7 +453,7 @@ def center_crop(
return_numpy = True if return_numpy is None else return_numpy
if not isinstance(image, np.ndarray):
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
if not isinstance(size, Iterable) or len(size) != 2:
raise ValueError("size must have 2 elements representing the height and width of the output image")
......
......@@ -377,7 +377,7 @@ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] =
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
raise TypeError(
"Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
......
......@@ -199,7 +199,7 @@ def get_modules_to_fuse(model, quantization_config):
The quantization configuration to use.
"""
if not isinstance(model, PreTrainedModel):
raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}")
raise TypeError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}")
# Always default to `quantization_config.modules_to_fuse`
if quantization_config.modules_to_fuse is not None:
......
......@@ -262,9 +262,7 @@ class PeftAdapterMixin:
raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.")
if not isinstance(adapter_config, PeftConfig):
raise ValueError(
f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead."
)
raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.")
# Retrieve the name or path of the model, one could also use self.config._name_or_path
# but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100
......
......@@ -1209,7 +1209,7 @@ class TFPreTrainedModel(keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushT
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
raise TypeError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
......
......@@ -1418,13 +1418,13 @@ class AlignModel(AlignPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, AlignTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type AlignTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, AlignVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type AlignVisionConfig but is of type"
f" {type(config.vision_config)}."
)
......
......@@ -1466,12 +1466,12 @@ class AltCLIPModel(AltCLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.vision_config, AltCLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type AltCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)
if not isinstance(config.text_config, AltCLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type AltCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
......
......@@ -211,7 +211,7 @@ class BarkProcessor(ProcessorMixin):
raise ValueError(f"Voice preset unrecognized, missing {key} as a key.")
if not isinstance(voice_preset[key], np.ndarray):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
raise TypeError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
......
......@@ -755,13 +755,13 @@ class BlipModel(BlipPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, BlipTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type BlipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, BlipVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type BlipVisionConfig but is of type"
f" {type(config.vision_config)}."
)
......
......@@ -794,13 +794,13 @@ class TFBlipMainLayer(keras.layers.Layer):
super().__init__(*args, **kwargs)
if not isinstance(config.text_config, BlipTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type BlipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, BlipVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type BlipVisionConfig but is of type"
f" {type(config.vision_config)}."
)
......
......@@ -113,7 +113,7 @@ class ChameleonProcessor(ProcessorMixin):
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# Replace the image token with the expanded image token sequence
prompt_strings = []
......
......@@ -1341,13 +1341,13 @@ class ChineseCLIPModel(ChineseCLIPPreTrainedModel):
super().__init__(config)
if not isinstance(config.text_config, ChineseCLIPTextConfig):
raise ValueError(
raise TypeError(
"config.text_config is expected to be of type ChineseCLIPTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, ChineseCLIPVisionConfig):
raise ValueError(
raise TypeError(
"config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type"
f" {type(config.vision_config)}."
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment