Unverified Commit f569172f authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

FIX / bnb: fix torch compatiblity issue with `itemize` (#30162)

* fix torch compatiblity issues

* fix

* Update src/transformers/modeling_utils.py
parent 4f7a9f9c
...@@ -1159,9 +1159,12 @@ class ModuleUtilsMixin: ...@@ -1159,9 +1159,12 @@ class ModuleUtilsMixin:
# For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are # For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are
# used for the 4bit quantization (uint8 tensors are stored) # used for the 4bit quantization (uint8 tensors are stored)
if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit): if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit):
total_numel.append( quant_storage = self.hf_quantizer.quantization_config.bnb_4bit_quant_storage
param.numel() * 2 * self.hf_quantizer.quantization_config.bnb_4bit_quant_storage.itemsize # For compatibility with older PT version - see: https://github.com/huggingface/peft/pull/1635
nb_params = (
quant_storage.itemsize if hasattr(quant_storage, "itemsize") else quant_storage.element_size()
) )
total_numel.append(param.numel() * 2 * nb_params)
else: else:
total_numel.append(param.numel()) total_numel.append(param.numel())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment