Unverified Commit 12be107b authored by hx89's avatar hx89 Committed by GitHub
Browse files

Update inception quantized model path (#1969)

* update model path

* remove autologits before loading quantized model
parent bbb4a9a1
...@@ -20,7 +20,7 @@ __all__ = [ ...@@ -20,7 +20,7 @@ __all__ = [
quant_model_urls = { quant_model_urls = {
# fp32 weights ported from TensorFlow, quantized in PyTorch # fp32 weights ported from TensorFlow, quantized in PyTorch
"inception_v3_google_fbgemm": "inception_v3_google_fbgemm":
"https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-4f6e4894.pth" "https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth"
} }
...@@ -65,6 +65,9 @@ def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs): ...@@ -65,6 +65,9 @@ def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs):
if pretrained: if pretrained:
if quantize: if quantize:
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
model_url = quant_model_urls['inception_v3_google' + '_' + backend] model_url = quant_model_urls['inception_v3_google' + '_' + backend]
else: else:
model_url = inception_module.model_urls['inception_v3_google'] model_url = inception_module.model_urls['inception_v3_google']
...@@ -74,9 +77,10 @@ def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs): ...@@ -74,9 +77,10 @@ def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs):
model.load_state_dict(state_dict) model.load_state_dict(state_dict)
if not original_aux_logits: if not quantize:
model.aux_logits = False if not original_aux_logits:
del model.AuxLogits model.aux_logits = False
del model.AuxLogits
return model return model
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment