Unverified Commit 864c8e6e authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

[`Awq`] Add peft support for AWQ (#28987)



* add peft support for AWQ

* Update src/transformers/quantizers/quantizer_awq.py
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>

* fix

---------
Co-authored-by: default avataramyeroberts <22614925+amyeroberts@users.noreply.github.com>
parent ce4fff0b
...@@ -11,8 +11,11 @@ ...@@ -11,8 +11,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import importlib.metadata
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from packaging import version
from .base import HfQuantizer from .base import HfQuantizer
...@@ -105,6 +108,6 @@ class AwqQuantizer(HfQuantizer): ...@@ -105,6 +108,6 @@ class AwqQuantizer(HfQuantizer):
@property @property
def is_trainable(self): def is_trainable(self):
# AWQ does not support neither QAT (Quantization Aware Training or PEFT yet.) # AWQ supports PEFT fine-tuning from version 0.2.0
# TODO: if this is supported in the future, do a version check here. MIN_AWQ_VERSION_FOR_PEFT = "0.2.0"
return False return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment