Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
72781eba
Commit
72781eba
authored
Mar 13, 2017
by
Sam Gross
Committed by
Soumith Chintala
Mar 13, 2017
Browse files
Add Inception v3 model ported from TensorFlow (#101)
parent
429dbebe
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
320 additions
and
0 deletions
+320
-0
torchvision/models/__init__.py
torchvision/models/__init__.py
+2
-0
torchvision/models/inception.py
torchvision/models/inception.py
+318
-0
No files found.
torchvision/models/__init__.py
View file @
72781eba
...
@@ -35,6 +35,7 @@ ResNet-34 26.70 8.58
...
@@ -35,6 +35,7 @@ ResNet-34 26.70 8.58
ResNet-50 23.85 7.13
ResNet-50 23.85 7.13
ResNet-101 22.63 6.44
ResNet-101 22.63 6.44
ResNet-152 21.69 5.94
ResNet-152 21.69 5.94
Inception v3 22.55 6.44
AlexNet 43.45 20.91
AlexNet 43.45 20.91
VGG-11 32.14 12.12
VGG-11 32.14 12.12
VGG-13 31.04 11.40
VGG-13 31.04 11.40
...
@@ -55,3 +56,4 @@ from .alexnet import *
...
@@ -55,3 +56,4 @@ from .alexnet import *
from
.resnet
import
*
from
.resnet
import
*
from
.vgg
import
*
from
.vgg
import
*
from
.squeezenet
import
*
from
.squeezenet
import
*
from
.inception
import
*
torchvision/models/inception.py
0 → 100644
View file @
72781eba
import
torch
import
torch.nn
as
nn
import
torch.nn.functional
as
F
import
torch.utils.model_zoo
as
model_zoo
__all__
=
[
'Inception3'
,
'inception_v3'
]
model_urls
=
{
# Inception v3 ported from TensorFlow
'inception_v3_google'
:
'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
,
}
def
inception_v3
(
pretrained
=
False
,
**
kwargs
):
r
"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if
pretrained
:
if
'transform_input'
not
in
kwargs
:
kwargs
[
'transform_input'
]
=
True
model
=
Inception3
(
**
kwargs
)
model
.
load_state_dict
(
model_zoo
.
load_url
(
model_urls
[
'inception_v3_google'
]))
return
model
return
Inception3
(
**
kwargs
)
class
Inception3
(
nn
.
Module
):
def
__init__
(
self
,
num_classes
=
1000
,
aux_logits
=
True
,
transform_input
=
False
):
super
(
Inception3
,
self
).
__init__
()
self
.
aux_logits
=
aux_logits
self
.
transform_input
=
transform_input
self
.
Conv2d_1a_3x3
=
BasicConv2d
(
3
,
32
,
kernel_size
=
3
,
stride
=
2
)
self
.
Conv2d_2a_3x3
=
BasicConv2d
(
32
,
32
,
kernel_size
=
3
)
self
.
Conv2d_2b_3x3
=
BasicConv2d
(
32
,
64
,
kernel_size
=
3
,
padding
=
1
)
self
.
Conv2d_3b_1x1
=
BasicConv2d
(
64
,
80
,
kernel_size
=
1
)
self
.
Conv2d_4a_3x3
=
BasicConv2d
(
80
,
192
,
kernel_size
=
3
)
self
.
Mixed_5b
=
InceptionA
(
192
,
pool_features
=
32
)
self
.
Mixed_5c
=
InceptionA
(
256
,
pool_features
=
64
)
self
.
Mixed_5d
=
InceptionA
(
288
,
pool_features
=
64
)
self
.
Mixed_6a
=
InceptionB
(
288
)
self
.
Mixed_6b
=
InceptionC
(
768
,
channels_7x7
=
128
)
self
.
Mixed_6c
=
InceptionC
(
768
,
channels_7x7
=
160
)
self
.
Mixed_6d
=
InceptionC
(
768
,
channels_7x7
=
160
)
self
.
Mixed_6e
=
InceptionC
(
768
,
channels_7x7
=
192
)
if
aux_logits
:
self
.
AuxLogits
=
InceptionAux
(
768
,
num_classes
)
self
.
Mixed_7a
=
InceptionD
(
768
)
self
.
Mixed_7b
=
InceptionE
(
1280
)
self
.
Mixed_7c
=
InceptionE
(
2048
)
self
.
fc
=
nn
.
Linear
(
2048
,
num_classes
)
for
m
in
self
.
modules
():
if
isinstance
(
m
,
nn
.
Conv2d
)
or
isinstance
(
m
,
nn
.
Linear
):
import
scipy.stats
as
stats
stddev
=
m
.
stddev
if
hasattr
(
m
,
'stddev'
)
else
0.1
X
=
stats
.
truncnorm
(
-
2
,
2
,
scale
=
stddev
)
values
=
torch
.
Tensor
(
X
.
rvs
(
m
.
weight
.
data
.
numel
()))
m
.
weight
.
data
.
copy_
(
values
)
elif
isinstance
(
m
,
nn
.
BatchNorm2d
):
m
.
weight
.
data
.
fill_
(
1
)
m
.
bias
.
data
.
zero_
()
def
forward
(
self
,
x
):
if
self
.
transform_input
:
x
=
x
.
clone
()
x
[
0
]
=
x
[
0
]
*
(
0.229
/
0.5
)
+
(
0.485
-
0.5
)
/
0.5
x
[
1
]
=
x
[
1
]
*
(
0.224
/
0.5
)
+
(
0.456
-
0.5
)
/
0.5
x
[
2
]
=
x
[
2
]
*
(
0.225
/
0.5
)
+
(
0.406
-
0.5
)
/
0.5
# 299 x 299 x 3
x
=
self
.
Conv2d_1a_3x3
(
x
)
# 149 x 149 x 32
x
=
self
.
Conv2d_2a_3x3
(
x
)
# 147 x 147 x 32
x
=
self
.
Conv2d_2b_3x3
(
x
)
# 147 x 147 x 64
x
=
F
.
max_pool2d
(
x
,
kernel_size
=
3
,
stride
=
2
)
# 73 x 73 x 64
x
=
self
.
Conv2d_3b_1x1
(
x
)
# 73 x 73 x 80
x
=
self
.
Conv2d_4a_3x3
(
x
)
# 71 x 71 x 192
x
=
F
.
max_pool2d
(
x
,
kernel_size
=
3
,
stride
=
2
)
# 35 x 35 x 192
x
=
self
.
Mixed_5b
(
x
)
# 35 x 35 x 256
x
=
self
.
Mixed_5c
(
x
)
# 35 x 35 x 288
x
=
self
.
Mixed_5d
(
x
)
# 35 x 35 x 288
x
=
self
.
Mixed_6a
(
x
)
# 17 x 17 x 768
x
=
self
.
Mixed_6b
(
x
)
# 17 x 17 x 768
x
=
self
.
Mixed_6c
(
x
)
# 17 x 17 x 768
x
=
self
.
Mixed_6d
(
x
)
# 17 x 17 x 768
x
=
self
.
Mixed_6e
(
x
)
# 17 x 17 x 768
if
self
.
training
and
self
.
aux_logits
:
aux
=
self
.
AuxLogits
(
x
)
# 17 x 17 x 768
x
=
self
.
Mixed_7a
(
x
)
# 8 x 8 x 1280
x
=
self
.
Mixed_7b
(
x
)
# 8 x 8 x 2048
x
=
self
.
Mixed_7c
(
x
)
# 8 x 8 x 2048
x
=
F
.
avg_pool2d
(
x
,
kernel_size
=
8
)
# 1 x 1 x 2048
x
=
F
.
dropout
(
x
,
training
=
self
.
training
)
# 1 x 1 x 2048
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
# 2048
x
=
self
.
fc
(
x
)
# 1000 (num_classes)
if
self
.
training
and
self
.
aux_logits
:
return
x
,
aux
return
x
class
InceptionA
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
pool_features
):
super
(
InceptionA
,
self
).
__init__
()
self
.
branch1x1
=
BasicConv2d
(
in_channels
,
64
,
kernel_size
=
1
)
self
.
branch5x5_1
=
BasicConv2d
(
in_channels
,
48
,
kernel_size
=
1
)
self
.
branch5x5_2
=
BasicConv2d
(
48
,
64
,
kernel_size
=
5
,
padding
=
2
)
self
.
branch3x3dbl_1
=
BasicConv2d
(
in_channels
,
64
,
kernel_size
=
1
)
self
.
branch3x3dbl_2
=
BasicConv2d
(
64
,
96
,
kernel_size
=
3
,
padding
=
1
)
self
.
branch3x3dbl_3
=
BasicConv2d
(
96
,
96
,
kernel_size
=
3
,
padding
=
1
)
self
.
branch_pool
=
BasicConv2d
(
in_channels
,
pool_features
,
kernel_size
=
1
)
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch5x5
=
self
.
branch5x5_1
(
x
)
branch5x5
=
self
.
branch5x5_2
(
branch5x5
)
branch3x3dbl
=
self
.
branch3x3dbl_1
(
x
)
branch3x3dbl
=
self
.
branch3x3dbl_2
(
branch3x3dbl
)
branch3x3dbl
=
self
.
branch3x3dbl_3
(
branch3x3dbl
)
branch_pool
=
F
.
avg_pool2d
(
x
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
branch_pool
=
self
.
branch_pool
(
branch_pool
)
outputs
=
[
branch1x1
,
branch5x5
,
branch3x3dbl
,
branch_pool
]
return
torch
.
cat
(
outputs
,
1
)
class
InceptionB
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
):
super
(
InceptionB
,
self
).
__init__
()
self
.
branch3x3
=
BasicConv2d
(
in_channels
,
384
,
kernel_size
=
3
,
stride
=
2
)
self
.
branch3x3dbl_1
=
BasicConv2d
(
in_channels
,
64
,
kernel_size
=
1
)
self
.
branch3x3dbl_2
=
BasicConv2d
(
64
,
96
,
kernel_size
=
3
,
padding
=
1
)
self
.
branch3x3dbl_3
=
BasicConv2d
(
96
,
96
,
kernel_size
=
3
,
stride
=
2
)
def
forward
(
self
,
x
):
branch3x3
=
self
.
branch3x3
(
x
)
branch3x3dbl
=
self
.
branch3x3dbl_1
(
x
)
branch3x3dbl
=
self
.
branch3x3dbl_2
(
branch3x3dbl
)
branch3x3dbl
=
self
.
branch3x3dbl_3
(
branch3x3dbl
)
branch_pool
=
F
.
max_pool2d
(
x
,
kernel_size
=
3
,
stride
=
2
)
outputs
=
[
branch3x3
,
branch3x3dbl
,
branch_pool
]
return
torch
.
cat
(
outputs
,
1
)
class
InceptionC
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
channels_7x7
):
super
(
InceptionC
,
self
).
__init__
()
self
.
branch1x1
=
BasicConv2d
(
in_channels
,
192
,
kernel_size
=
1
)
c7
=
channels_7x7
self
.
branch7x7_1
=
BasicConv2d
(
in_channels
,
c7
,
kernel_size
=
1
)
self
.
branch7x7_2
=
BasicConv2d
(
c7
,
c7
,
kernel_size
=
(
1
,
7
),
padding
=
(
0
,
3
))
self
.
branch7x7_3
=
BasicConv2d
(
c7
,
192
,
kernel_size
=
(
7
,
1
),
padding
=
(
3
,
0
))
self
.
branch7x7dbl_1
=
BasicConv2d
(
in_channels
,
c7
,
kernel_size
=
1
)
self
.
branch7x7dbl_2
=
BasicConv2d
(
c7
,
c7
,
kernel_size
=
(
7
,
1
),
padding
=
(
3
,
0
))
self
.
branch7x7dbl_3
=
BasicConv2d
(
c7
,
c7
,
kernel_size
=
(
1
,
7
),
padding
=
(
0
,
3
))
self
.
branch7x7dbl_4
=
BasicConv2d
(
c7
,
c7
,
kernel_size
=
(
7
,
1
),
padding
=
(
3
,
0
))
self
.
branch7x7dbl_5
=
BasicConv2d
(
c7
,
192
,
kernel_size
=
(
1
,
7
),
padding
=
(
0
,
3
))
self
.
branch_pool
=
BasicConv2d
(
in_channels
,
192
,
kernel_size
=
1
)
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch7x7
=
self
.
branch7x7_1
(
x
)
branch7x7
=
self
.
branch7x7_2
(
branch7x7
)
branch7x7
=
self
.
branch7x7_3
(
branch7x7
)
branch7x7dbl
=
self
.
branch7x7dbl_1
(
x
)
branch7x7dbl
=
self
.
branch7x7dbl_2
(
branch7x7dbl
)
branch7x7dbl
=
self
.
branch7x7dbl_3
(
branch7x7dbl
)
branch7x7dbl
=
self
.
branch7x7dbl_4
(
branch7x7dbl
)
branch7x7dbl
=
self
.
branch7x7dbl_5
(
branch7x7dbl
)
branch_pool
=
F
.
avg_pool2d
(
x
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
branch_pool
=
self
.
branch_pool
(
branch_pool
)
outputs
=
[
branch1x1
,
branch7x7
,
branch7x7dbl
,
branch_pool
]
return
torch
.
cat
(
outputs
,
1
)
class
InceptionD
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
):
super
(
InceptionD
,
self
).
__init__
()
self
.
branch3x3_1
=
BasicConv2d
(
in_channels
,
192
,
kernel_size
=
1
)
self
.
branch3x3_2
=
BasicConv2d
(
192
,
320
,
kernel_size
=
3
,
stride
=
2
)
self
.
branch7x7x3_1
=
BasicConv2d
(
in_channels
,
192
,
kernel_size
=
1
)
self
.
branch7x7x3_2
=
BasicConv2d
(
192
,
192
,
kernel_size
=
(
1
,
7
),
padding
=
(
0
,
3
))
self
.
branch7x7x3_3
=
BasicConv2d
(
192
,
192
,
kernel_size
=
(
7
,
1
),
padding
=
(
3
,
0
))
self
.
branch7x7x3_4
=
BasicConv2d
(
192
,
192
,
kernel_size
=
3
,
stride
=
2
)
def
forward
(
self
,
x
):
branch3x3
=
self
.
branch3x3_1
(
x
)
branch3x3
=
self
.
branch3x3_2
(
branch3x3
)
branch7x7x3
=
self
.
branch7x7x3_1
(
x
)
branch7x7x3
=
self
.
branch7x7x3_2
(
branch7x7x3
)
branch7x7x3
=
self
.
branch7x7x3_3
(
branch7x7x3
)
branch7x7x3
=
self
.
branch7x7x3_4
(
branch7x7x3
)
branch_pool
=
F
.
max_pool2d
(
x
,
kernel_size
=
3
,
stride
=
2
)
outputs
=
[
branch3x3
,
branch7x7x3
,
branch_pool
]
return
torch
.
cat
(
outputs
,
1
)
class
InceptionE
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
):
super
(
InceptionE
,
self
).
__init__
()
self
.
branch1x1
=
BasicConv2d
(
in_channels
,
320
,
kernel_size
=
1
)
self
.
branch3x3_1
=
BasicConv2d
(
in_channels
,
384
,
kernel_size
=
1
)
self
.
branch3x3_2a
=
BasicConv2d
(
384
,
384
,
kernel_size
=
(
1
,
3
),
padding
=
(
0
,
1
))
self
.
branch3x3_2b
=
BasicConv2d
(
384
,
384
,
kernel_size
=
(
3
,
1
),
padding
=
(
1
,
0
))
self
.
branch3x3dbl_1
=
BasicConv2d
(
in_channels
,
448
,
kernel_size
=
1
)
self
.
branch3x3dbl_2
=
BasicConv2d
(
448
,
384
,
kernel_size
=
3
,
padding
=
1
)
self
.
branch3x3dbl_3a
=
BasicConv2d
(
384
,
384
,
kernel_size
=
(
1
,
3
),
padding
=
(
0
,
1
))
self
.
branch3x3dbl_3b
=
BasicConv2d
(
384
,
384
,
kernel_size
=
(
3
,
1
),
padding
=
(
1
,
0
))
self
.
branch_pool
=
BasicConv2d
(
in_channels
,
192
,
kernel_size
=
1
)
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch3x3
=
self
.
branch3x3_1
(
x
)
branch3x3
=
[
self
.
branch3x3_2a
(
branch3x3
),
self
.
branch3x3_2b
(
branch3x3
),
]
branch3x3
=
torch
.
cat
(
branch3x3
,
1
)
branch3x3dbl
=
self
.
branch3x3dbl_1
(
x
)
branch3x3dbl
=
self
.
branch3x3dbl_2
(
branch3x3dbl
)
branch3x3dbl
=
[
self
.
branch3x3dbl_3a
(
branch3x3dbl
),
self
.
branch3x3dbl_3b
(
branch3x3dbl
),
]
branch3x3dbl
=
torch
.
cat
(
branch3x3dbl
,
1
)
branch_pool
=
F
.
avg_pool2d
(
x
,
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
branch_pool
=
self
.
branch_pool
(
branch_pool
)
outputs
=
[
branch1x1
,
branch3x3
,
branch3x3dbl
,
branch_pool
]
return
torch
.
cat
(
outputs
,
1
)
class
InceptionAux
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
num_classes
):
super
(
InceptionAux
,
self
).
__init__
()
self
.
conv0
=
BasicConv2d
(
in_channels
,
128
,
kernel_size
=
1
)
self
.
conv1
=
BasicConv2d
(
128
,
768
,
kernel_size
=
5
)
self
.
conv1
.
stddev
=
0.01
self
.
fc
=
nn
.
Linear
(
768
,
num_classes
)
self
.
fc
.
stddev
=
0.001
def
forward
(
self
,
x
):
# 17 x 17 x 768
x
=
F
.
avg_pool2d
(
x
,
kernel_size
=
5
,
stride
=
3
)
# 5 x 5 x 768
x
=
self
.
conv0
(
x
)
# 5 x 5 x 128
x
=
self
.
conv1
(
x
)
# 1 x 1 x 768
x
=
x
.
view
(
x
.
size
(
0
),
-
1
)
# 768
x
=
self
.
fc
(
x
)
# 1000
return
x
class
BasicConv2d
(
nn
.
Module
):
def
__init__
(
self
,
in_channels
,
out_channels
,
**
kwargs
):
super
(
BasicConv2d
,
self
).
__init__
()
self
.
conv
=
nn
.
Conv2d
(
in_channels
,
out_channels
,
bias
=
False
,
**
kwargs
)
self
.
bn
=
nn
.
BatchNorm2d
(
out_channels
,
eps
=
0.001
)
def
forward
(
self
,
x
):
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
return
F
.
relu
(
x
,
inplace
=
True
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment