Unverified Commit ba36b552 authored by Lianmin Zheng's avatar Lianmin Zheng Committed by GitHub
Browse files

Revert "Small fixes for torchao quant" (#2493)

parent 9cd9dc83
......@@ -26,12 +26,11 @@ def apply_torchao_config_to_model(
quantize_,
)
from torchao.quantization.observer import PerRow, PerTensor
from torchao.quantization.quant_api import _is_linear
if filter_fn is None:
def filter_fn(module, fqn):
return _is_linear(module) and "proj" in fqn
return "proj" in fqn
if torchao_config == "" or torchao_config is None:
return model
......
......@@ -157,10 +157,6 @@ class ModelRunner:
self.sampler = Sampler()
self.load_model()
apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)
# Apply torch TP if the model supports it
supports_torch_tp = getattr(self.model, "supports_torch_tp", False)
if self.tp_size > 1 and supports_torch_tp:
......@@ -169,6 +165,10 @@ class ModelRunner:
else:
self.torch_tp_applied = False
apply_torchao_config_to_model(
self.model, global_server_args_dict["torchao_config"]
)
# Init memory pool and attention backends
if server_args.lora_paths is not None:
self.init_lora_manager()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment