"tests/models/superpoint/__init__.py" did not exist on "31c23bd5ee26425a67f92fc170789656379252a6"
Unverified Commit 9fea71b4 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Fix last instances of kbit -> quantized (#23797)

parent 38dbbc26
......@@ -2237,7 +2237,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
# We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
logger.info(
f"Overriding torch_dtype={torch_dtype} with `torch_dtype=torch.float16` due to "
"requirements of `bitsandbytes` to enable model loading in mixed kbit. "
"requirements of `bitsandbytes` to enable model loading in 8-bit or 4-bit. "
"Pass your own torch_dtype to specify the dtype of the remaining non-linear layers or pass"
" torch_dtype=torch.float16 to remove this warning."
)
......@@ -2683,7 +2683,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
)
# training in 8-bit is only available in 0.37.0+
model._is_kbit_training_enabled = version.parse(
model._is_quantized_training_enabled = version.parse(
importlib_metadata.version("bitsandbytes")
) >= version.parse("0.37.0")
......
......@@ -403,8 +403,8 @@ class Trainer:
)
# At this stage the model is already loaded
if getattr(model, "is_loaded_in_kbit", False):
if getattr(model, "_is_kbit_training_enabled", False):
if getattr(model, "is_quantized", False):
if getattr(model, "_is_quantized_training_enabled", False):
logger.info(
"The model is loaded in 8-bit precision. To train this model you need to add additional modules"
" inside the model such as adapters using `peft` library and freeze the model weights. Please"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment