Unverified Commit 9841a907 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Better model checkpoints for various classification models (#4900)

* Add new weights for MobileNetV3 Large.

* Update weights for ResNet101.

* Update weights for ResNet152.

* Update numbers of ResNet101 and ResNet152 with batch size 1.

* Adding new weights for ResNeXt101_32x8d

* Batch size 1 stats for ResNeXt101_32x8d

* Update weights for MobileNetV3 Large.

* Update weights for ResNeXt50.

* Fix merge issues.

* Updating the URLs to indicate the recipe config.
parent 983d27ed
...@@ -41,7 +41,6 @@ _COMMON_META = { ...@@ -41,7 +41,6 @@ _COMMON_META = {
"size": (224, 224), "size": (224, 224),
"categories": _IMAGENET_CATEGORIES, "categories": _IMAGENET_CATEGORIES,
"interpolation": InterpolationMode.BILINEAR, "interpolation": InterpolationMode.BILINEAR,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
} }
...@@ -51,10 +50,21 @@ class MobileNetV3LargeWeights(Weights): ...@@ -51,10 +50,21 @@ class MobileNetV3LargeWeights(Weights):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 74.042, "acc@1": 74.042,
"acc@5": 91.340, "acc@5": 91.340,
}, },
) )
ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
"acc@1": 75.274,
"acc@5": 92.566,
},
)
class MobileNetV3SmallWeights(Weights): class MobileNetV3SmallWeights(Weights):
...@@ -63,6 +73,7 @@ class MobileNetV3SmallWeights(Weights): ...@@ -63,6 +73,7 @@ class MobileNetV3SmallWeights(Weights):
transforms=partial(ImageNetEval, crop_size=224), transforms=partial(ImageNetEval, crop_size=224),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
"acc@1": 67.668, "acc@1": 67.668,
"acc@5": 87.402, "acc@5": 87.402,
}, },
......
...@@ -96,7 +96,7 @@ class ResNet50Weights(Weights): ...@@ -96,7 +96,7 @@ class ResNet50Weights(Weights):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 80.674, "acc@1": 80.674,
"acc@5": 95.166, "acc@5": 95.166,
}, },
...@@ -115,13 +115,13 @@ class ResNet101Weights(Weights): ...@@ -115,13 +115,13 @@ class ResNet101Weights(Weights):
}, },
) )
ImageNet1K_RefV2 = WeightEntry( ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/resnet101-b641f3a9.pth", url="https://download.pytorch.org/models/resnet101-cd907fc2.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.728, "acc@1": 81.886,
"acc@5": 95.670, "acc@5": 95.780,
}, },
) )
...@@ -138,13 +138,13 @@ class ResNet152Weights(Weights): ...@@ -138,13 +138,13 @@ class ResNet152Weights(Weights):
}, },
) )
ImageNet1K_RefV2 = WeightEntry( ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/resnet152-089c0848.pth", url="https://download.pytorch.org/models/resnet152-f82ba261.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 82.042, "acc@1": 82.284,
"acc@5": 95.926, "acc@5": 96.002,
}, },
) )
...@@ -161,13 +161,13 @@ class ResNeXt50_32x4dWeights(Weights): ...@@ -161,13 +161,13 @@ class ResNeXt50_32x4dWeights(Weights):
}, },
) )
ImageNet1K_RefV2 = WeightEntry( ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/resnext50_32x4d-b260af35.pth", url="https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe",
"acc@1": 81.116, "acc@1": 81.198,
"acc@5": 95.478, "acc@5": 95.340,
}, },
) )
...@@ -183,6 +183,16 @@ class ResNeXt101_32x8dWeights(Weights): ...@@ -183,6 +183,16 @@ class ResNeXt101_32x8dWeights(Weights):
"acc@5": 94.526, "acc@5": 94.526,
}, },
) )
ImageNet1K_RefV2 = WeightEntry(
url="https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth",
transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={
**_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 82.834,
"acc@5": 96.228,
},
)
class WideResNet50_2Weights(Weights): class WideResNet50_2Weights(Weights):
...@@ -201,7 +211,7 @@ class WideResNet50_2Weights(Weights): ...@@ -201,7 +211,7 @@ class WideResNet50_2Weights(Weights):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 81.602, "acc@1": 81.602,
"acc@5": 95.758, "acc@5": 95.758,
}, },
...@@ -224,7 +234,7 @@ class WideResNet101_2Weights(Weights): ...@@ -224,7 +234,7 @@ class WideResNet101_2Weights(Weights):
transforms=partial(ImageNetEval, crop_size=224, resize_size=232), transforms=partial(ImageNetEval, crop_size=224, resize_size=232),
meta={ meta={
**_COMMON_META, **_COMMON_META,
"recipe": "https://github.com/pytorch/vision/issues/3995", "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-fixres",
"acc@1": 82.492, "acc@1": 82.492,
"acc@5": 96.110, "acc@5": 96.110,
}, },
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment