Unverified Commit f03ca0f9 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Switch torch.quantization to torch.ao.quantization (#5296)

parent 693a4632
......@@ -43,14 +43,14 @@ def _mobilenet_v3_model(
if quantize:
model.fuse_model()
model.qconfig = torch.quantization.get_default_qat_qconfig(backend)
torch.quantization.prepare_qat(model, inplace=True)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(backend)
torch.ao.quantization.prepare_qat(model, inplace=True)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
if quantize:
torch.quantization.convert(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
model.eval()
return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment