Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
f9f1f2ac
Unverified
Commit
f9f1f2ac
authored
Jan 31, 2024
by
Younes Belkada
Committed by
GitHub
Jan 31, 2024
Browse files
[`HFQuantizer`] Remove `check_packages_compatibility` logic (#28789)
remove `check_packages_compatibility` logic
parent
ae0c27ad
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
0 additions
and
22 deletions
+0
-22
src/transformers/quantizers/base.py
src/transformers/quantizers/base.py
+0
-22
No files found.
src/transformers/quantizers/base.py
View file @
f9f1f2ac
...
@@ -15,7 +15,6 @@ from abc import ABC, abstractmethod
...
@@ -15,7 +15,6 @@ from abc import ABC, abstractmethod
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
Optional
,
Union
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
Optional
,
Union
from
..utils
import
is_torch_available
from
..utils
import
is_torch_available
from
..utils.import_utils
import
_is_package_available
from
..utils.quantization_config
import
QuantizationConfigMixin
from
..utils.quantization_config
import
QuantizationConfigMixin
...
@@ -64,8 +63,6 @@ class HfQuantizer(ABC):
...
@@ -64,8 +63,6 @@ class HfQuantizer(ABC):
f
"pass `pre_quantized=True` while knowing what you are doing."
f
"pass `pre_quantized=True` while knowing what you are doing."
)
)
self
.
check_packages_compatibility
()
def
update_torch_dtype
(
self
,
torch_dtype
:
"torch.dtype"
)
->
"torch.dtype"
:
def
update_torch_dtype
(
self
,
torch_dtype
:
"torch.dtype"
)
->
"torch.dtype"
:
"""
"""
Some quantization methods require to explicitly set the dtype of the model to a
Some quantization methods require to explicitly set the dtype of the model to a
...
@@ -152,25 +149,6 @@ class HfQuantizer(ABC):
...
@@ -152,25 +149,6 @@ class HfQuantizer(ABC):
"""
"""
return
return
def
check_packages_compatibility
(
self
):
"""
Check the compatibility of the quantizer with respect to the current environment. Loops over all packages
name under `self.required_packages` and checks if that package is available.
"""
if
self
.
required_packages
is
not
None
:
non_available_packages
=
[]
for
package_name
in
self
.
required_packages
:
is_package_available
=
_is_package_available
(
package_name
)
if
not
is_package_available
:
non_available_packages
.
append
(
package_name
)
if
len
(
non_available_packages
)
>
0
:
raise
ValueError
(
f
"The packages
{
self
.
required_packages
}
are required to use
{
self
.
__class__
.
__name__
}
"
f
" the following packages are missing in your environment:
{
non_available_packages
}
, please make sure"
f
" to install them in order to use the quantizer."
)
def
preprocess_model
(
self
,
model
:
"PreTrainedModel"
,
**
kwargs
):
def
preprocess_model
(
self
,
model
:
"PreTrainedModel"
,
**
kwargs
):
"""
"""
Setting model attributes and/or converting model before weights loading. At this point
Setting model attributes and/or converting model before weights loading. At this point
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment