Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
convert_onnx
Commits
8590ec24
Commit
8590ec24
authored
May 24, 2024
by
yaoht
Browse files
init commit
parent
38a732e1
Changes
58
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
10008 additions
and
2 deletions
+10008
-2
README.md
README.md
+20
-2
caffe/caffe2onnx/.gitignore
caffe/caffe2onnx/.gitignore
+248
-0
caffe/caffe2onnx/README.md
caffe/caffe2onnx/README.md
+41
-0
caffe/caffe2onnx/caffe2onnx.py
caffe/caffe2onnx/caffe2onnx.py
+35
-0
caffe/caffe2onnx/doc/onnx-version.md
caffe/caffe2onnx/doc/onnx-version.md
+38
-0
caffe/caffe2onnx/proto/__init__.py
caffe/caffe2onnx/proto/__init__.py
+0
-0
caffe/caffe2onnx/proto/caffe_upsample.proto
caffe/caffe2onnx/proto/caffe_upsample.proto
+1676
-0
caffe/caffe2onnx/proto/caffe_upsample_pb2.py
caffe/caffe2onnx/proto/caffe_upsample_pb2.py
+7340
-0
caffe/caffe2onnx/src/OPs/Add.py
caffe/caffe2onnx/src/OPs/Add.py
+28
-0
caffe/caffe2onnx/src/OPs/Axpy.py
caffe/caffe2onnx/src/OPs/Axpy.py
+77
-0
caffe/caffe2onnx/src/OPs/BatchNorm.py
caffe/caffe2onnx/src/OPs/BatchNorm.py
+25
-0
caffe/caffe2onnx/src/OPs/Clip.py
caffe/caffe2onnx/src/OPs/Clip.py
+42
-0
caffe/caffe2onnx/src/OPs/Concat.py
caffe/caffe2onnx/src/OPs/Concat.py
+56
-0
caffe/caffe2onnx/src/OPs/Conv.py
caffe/caffe2onnx/src/OPs/Conv.py
+76
-0
caffe/caffe2onnx/src/OPs/ConvTranspose.py
caffe/caffe2onnx/src/OPs/ConvTranspose.py
+66
-0
caffe/caffe2onnx/src/OPs/Crop.py
caffe/caffe2onnx/src/OPs/Crop.py
+59
-0
caffe/caffe2onnx/src/OPs/DetectionOutput.py
caffe/caffe2onnx/src/OPs/DetectionOutput.py
+86
-0
caffe/caffe2onnx/src/OPs/Dropout.py
caffe/caffe2onnx/src/OPs/Dropout.py
+23
-0
caffe/caffe2onnx/src/OPs/Eltwise.py
caffe/caffe2onnx/src/OPs/Eltwise.py
+33
-0
caffe/caffe2onnx/src/OPs/Flatten.py
caffe/caffe2onnx/src/OPs/Flatten.py
+39
-0
No files found.
README.md
View file @
8590ec24
# convert_onnx
examples of converting caffe, pytorch, tf, tflite, paddle model to onnx model.
# onnx模型转换示例
\ No newline at end of file
将caffe, pytorch, tf, tflite, paddle 模型转成onnx模型的示例。
## caffe转onnx
caffe转onnx请参考
[
caffe/caffe2onnx/README.md
](
caffe/caffe2onnx/README.md
)
## paddle转onnx
paddle转onnx请参考
[
paddle/README.md
](
paddle/README.md
)
## tf以及tflite转onnx
tf以及tflite请参考
[
tf_tflite/README.md
](
tf_tflite/README.md
)
## torch转onnx
torch转onnx请参考
[
torch/README.md
](
torch/README.md
)
caffe/caffe2onnx/.gitignore
0 → 100644
View file @
8590ec24
### https://raw.github.com/github/gitignore/2f75277037d172200d4a37621c1b9c3b9901dbd8/Global/JetBrains.gitignore
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### https://raw.github.com/github/gitignore/2f75277037d172200d4a37621c1b9c3b9901dbd8/Python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
### https://raw.github.com/github/gitignore/2f75277037d172200d4a37621c1b9c3b9901dbd8/Global/macOS.gitignore
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
caffe/caffe2onnx/README.md
0 → 100644
View file @
8590ec24
# Caffe 模型转换为 ONNX 模型
## 1. 环境搭建
-
onnx(version == 1.6.0)
```
shell script
pip3 install onnx==1.6.0
```
-
numpy(version >= 1.17.0)
```
shell script
pip3 install numpy
```
## 2. caffe2onnx 工具使用
-
进入工具目录
```
shell script
cd <tnn_root_path>/tools/caffe2onnx/
```
-
模型下载
示例caffe模型
[
下载地址
](
https://github.com/GammaLab-HPC/dawnbench_inference_imagenet/tree/master/model
)
,prototxt和caffemodel均需要下载。
-
使用转换工具
```
shell script
python3 convert2onnx.py ./resnet50.prototxt ./resnet50.caffemodel -o ./test.onnx
```
-
转换工具脚本参数说明
```
text
usage: caffe2onnx.py [-h] [-o ONNX_FILE] proto_file caffe_model_file
convert caffe model to onnx
positional arguments:
proto_file the path for prototxt file, the file name must end with .prototxt
caffe_model_file the path for caffe model file, the file name must end with .caffemodel!
options:
-h, --help show this help message and exit
-o ONNX_FILE the path for generate onnx file
```
\ No newline at end of file
caffe/caffe2onnx/caffe2onnx.py
0 → 100644
View file @
8590ec24
from
src.load_save_model
import
LoadCaffeModel
,
SaveOnnxModel
from
src.caffe2onnx
import
Caffe2Onnx
from
src.args_parser
import
parse_args
from
src.utils
import
is_ssd_model
def
main
(
args
):
caffe_graph_path
=
args
.
proto_file
caffe_params_path
=
args
.
caffe_model_file
pos_s
=
caffe_graph_path
.
rfind
(
"/"
)
if
pos_s
==
-
1
:
pos_s
=
0
pos_dot
=
caffe_graph_path
.
rfind
(
"."
)
onnx_name
=
caffe_graph_path
[
pos_s
+
1
:
pos_dot
]
save_path
=
caffe_graph_path
[
0
:
pos_dot
]
+
'.onnx'
if
args
.
onnx_file
is
not
None
:
save_path
=
args
.
onnx_file
graph
,
params
=
LoadCaffeModel
(
caffe_graph_path
,
caffe_params_path
)
print
(
'2. 开始进行模型转换'
)
c2o
=
Caffe2Onnx
(
graph
,
params
,
onnx_name
)
print
(
'3. 创建 onnx 模型'
)
onnx_model
=
c2o
.
createOnnxModel
()
print
(
'4. 保存 onnx 模型'
)
# is_ssd = is_ssd_model(caffe_graph_path)
# if is_ssd:
SaveOnnxModel
(
onnx_model
,
save_path
,
need_polish
=
False
)
# else:
# SaveOnnxModel(onnx_model, save_path, need_polish=True)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
caffe/caffe2onnx/doc/onnx-version.md
0 → 100644
View file @
8590ec24
# onnx version
| onnx | 1.2.2 | 1.6.0 | compatible |
|-----------------------|------------------------------------|--------------------------------------------------|--------------|
| AveragePool | - | attributes(ceil
\_
mode) | yes |
| BatchNormalization | spatial | spatial(delete) (not use) | yes |
| Clip | attributes(min, max) | inputs(min, max) | yes(support) |
| Concat | - | - | yes |
| Conv | - | - | yes |
| ConvTranspose | - | - | yes |
| DepthToSpace | attributes(blocksize) | attributes(blocksize,mode) | yes(support) |
| Div | - | - | yes |
| Exp | - | - | yes |
| Expand | not support | support | yes |
| Gemm | - | - | yes |
| GlobalAveragePool | - | - | yes |
| GlobalMaxPool | - | - | yes |
| InstanceNormalization | - | - | yes |
| LeakyRelu | - | - | yes |
| MaxPool | - | attributes(ceil
\_
mode, dilations,storage
\_
order) | ? |
| Mul | - | - | yes |
| PRelu | - | - | yes |
| Pad | attributes(pads, value) | inputs(pads, constant_value) | yes(support) |
| ReduceL2 | - | - | yes |
| ReduceMean | - | - | yes |
| ReduceSum | - | - | yes |
| Relu | - | - | yes |
| Reshape | - | - | yes |
| Slice | attributes(starts,ends,axes,steps) | inputs(starts,ends,axes,steps) | yes(support) |
| Softmax | - | - | yes |
| Softplus | - | - | yes |
| Split | - | - | yes |
| Sub | - | - | yes |
| Tanh | - | - | yes |
| Tile | - | - | yes |
| Transpose | - | - | yes |
| Upsample | Upsample | deprecated(弃用了), 使用 Resize 替代 | yes |
caffe/caffe2onnx/proto/__init__.py
0 → 100644
View file @
8590ec24
caffe/caffe2onnx/proto/caffe_upsample.proto
0 → 100644
View file @
8590ec24
This diff is collapsed.
Click to expand it.
caffe/caffe2onnx/proto/caffe_upsample_pb2.py
0 → 100644
View file @
8590ec24
This diff is collapsed.
Click to expand it.
caffe/caffe2onnx/src/OPs/Add.py
0 → 100644
View file @
8590ec24
# Tencent is pleased to support the open source community by making TNN available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import
src.c2oObject
as
Node
def
get_add_output_shape
(
input_shape
):
output_shape
=
input_shape
[
0
]
return
[
output_shape
]
def
create_add_node
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
output_shape
=
get_add_output_shape
(
input_shape
)
node
=
Node
.
c2oNode
(
layer
,
node_name
,
'Add'
,
input_name
,
output_name
,
input_shape
,
output_shape
)
return
node
caffe/caffe2onnx/src/OPs/Axpy.py
0 → 100644
View file @
8590ec24
# Tencent is pleased to support the open source community by making TNN available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import
src.c2oObject
as
Node
from
typing
import
*
import
copy
def
need_add_reshape
(
input_shape
:
List
[
List
])
->
bool
:
return
len
(
input_shape
[
0
])
!=
len
(
input_shape
[
1
])
def
get_param_shape
(
input_shape
:
List
[
List
])
->
List
:
input
=
input_shape
[
0
]
scale
=
copy
.
deepcopy
(
input_shape
[
1
])
if
len
(
input
)
>
len
(
scale
):
for
i
in
range
(
len
(
input
)
-
len
(
scale
)):
scale
.
append
(
1
)
return
scale
def
broadcast_scale
(
input_shape
:
List
[
List
])
->
List
[
List
]:
input
=
input_shape
[
0
]
scale
=
input_shape
[
1
]
if
len
(
input
)
>
len
(
scale
):
for
i
in
range
(
len
(
input
)
-
len
(
scale
)):
scale
.
append
(
1
)
broadcast_shape
=
[
input
,
scale
]
elif
len
(
input
)
<
len
(
scale
):
print
(
"the scale should be less than input"
)
exit
(
-
1
)
else
:
broadcast_shape
=
[
input
,
scale
]
return
broadcast_shape
def
get_mul_output_shape
(
input_shape
:
List
[
List
])
->
List
[
List
]:
output_shape
=
input_shape
[
1
]
return
[
output_shape
]
def
create_axpy_mul_node
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
new_node_name
=
node_name
+
"_middle"
output_shape
=
get_mul_output_shape
(
input_shape
)
new_input_name
=
[
input_name
[
0
],
input_name
[
1
]]
new_output_name
=
[
output_name
[
0
]
+
"_mul"
]
new_input_shape
=
[
input_shape
[
0
],
input_shape
[
1
]]
node
=
Node
.
c2oNode
(
layer
,
new_node_name
,
'Mul'
,
new_input_name
,
new_output_name
,
new_input_shape
,
output_shape
)
return
node
def
get_add_output_shape
(
input_shape
):
output_shape
=
input_shape
[
1
]
return
[
output_shape
]
def
create_axpy_add_node
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
output_shape
=
get_add_output_shape
(
input_shape
)
new_input_name
=
[
node_name
+
"_mul"
,
input_name
[
2
]]
new_input_shape
=
[
input_shape
[
1
],
input_shape
[
2
]]
node
=
Node
.
c2oNode
(
layer
,
node_name
,
"Add"
,
new_input_name
,
output_name
,
input_shape
,
output_shape
)
return
node
caffe/caffe2onnx/src/OPs/BatchNorm.py
0 → 100644
View file @
8590ec24
import
src.c2oObject
as
Node
##-----------------------------BatchNormalization层 = BatchNorm + Scale-------------------------------------##
#获取超参数
def
getBNAttri
(
layer
):
#超参数字典
eps
=
layer
.
batch_norm_param
.
eps
momentum
=
layer
.
batch_norm_param
.
moving_average_fraction
dict
=
{
"epsilon"
:
eps
,
# 滑动系数
"momentum"
:
momentum
}
return
dict
#计算输出维度
def
getBNOutShape
(
input_shape
):
output_shape
=
input_shape
return
output_shape
#构建节点
def
createBN
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
dict
=
getBNAttri
(
layer
)
#计算output_shape,输出维度等于输入维度
output_shape
=
getBNOutShape
(
input_shape
)
#构建node
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"BatchNormalization"
,
inname
,
outname
,
input_shape
,
output_shape
,
dict
)
return
node
\ No newline at end of file
caffe/caffe2onnx/src/OPs/Clip.py
0 → 100644
View file @
8590ec24
# Tencent is pleased to support the open source community by making TNN available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import
src.c2oObject
as
Node
def
get_attribute
(
layer
):
attributes
=
{}
max_attribute
=
0
min_attribute
=
0
if
layer
.
type
==
'ReLU6'
:
max_attribute
=
6.0
min_attribute
=
0
attribute
=
{
'max'
:
max_attribute
,
'min'
:
min_attribute
}
return
attributes
def
get_clip_output_shape
(
input_shape
):
output_shape
=
input_shape
return
output_shape
def
create_clip_node
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
# onnx 1.6.0 don't use
# attributes = get_attribute(layer)
output_shape
=
get_clip_output_shape
(
input_shape
)
node
=
Node
.
c2oNode
(
layer
,
node_name
,
'Clip'
,
input_name
,
output_name
,
input_shape
,
output_shape
)
return
node
caffe/caffe2onnx/src/OPs/Concat.py
0 → 100644
View file @
8590ec24
import
src.c2oObject
as
Node
from
typing
import
List
import
copy
def
get_concat_attributes
(
layer
):
##轴
axis
=
layer
.
concat_param
.
axis
attributes
=
{
"axis"
:
axis
}
return
attributes
# 计算输出维度
def
get_concat_outshape
(
layer
,
input_shape
:
List
)
->
List
:
bottom
=
input_shape
[
0
]
axis
=
layer
.
concat_param
.
axis
output_shape
=
copy
.
deepcopy
(
bottom
)
assert
(
axis
<
len
(
bottom
))
for
i
in
range
(
1
,
len
(
input_shape
)):
output_shape
[
axis
]
=
output_shape
[
axis
]
+
input_shape
[
i
][
axis
]
return
[
output_shape
]
#
# if len(bottom) == 2:
# n, c = bottom[0], 0
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c]]
# return output_shape
#
# elif len(bottom) == 3:
# n, c = bottom[0], 0
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c]]
# return output_shape
#
# elif len(bottom) == 4:
# n, c, w, h = input_shape[0][0], 0, input_shape[0][2], input_shape[0][3]
# for i in range(len(input_shape)):
# c = c + input_shape[i][1]
# output_shape = [[n, c, w, h]]
# return output_shape
# 构建节点
def
createConcat
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
attributes
=
get_concat_attributes
(
layer
)
output_shape
=
get_concat_outshape
(
layer
,
input_shape
)
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"Concat"
,
inname
,
outname
,
input_shape
,
output_shape
,
attributes
)
return
node
caffe/caffe2onnx/src/OPs/Conv.py
0 → 100644
View file @
8590ec24
import
numpy
as
np
import
src.c2oObject
as
Node
import
math
# 获取超参数
def
getConvAttri
(
layer
,
input_shape
):
# 膨胀系数 dilations
dilations
=
[
1
,
1
]
if
layer
.
convolution_param
.
dilation
!=
[]:
dilation
=
layer
.
convolution_param
.
dilation
[
0
]
dilations
=
[
dilation
,
dilation
]
##填充pads
pads
=
[
0
,
0
,
0
,
0
]
# 默认为0
if
layer
.
convolution_param
.
pad
!=
[]:
# 若存在pad,则根据pad赋值
pads
=
np
.
array
([
layer
.
convolution_param
.
pad
]
*
4
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
elif
layer
.
convolution_param
.
pad_h
!=
0
or
layer
.
convolution_param
.
pad_w
!=
0
:
# 若存在pad_w,pad_h则根据其赋值
pads
=
[
layer
.
convolution_param
.
pad_h
,
layer
.
convolution_param
.
pad_w
,
layer
.
convolution_param
.
pad_h
,
layer
.
convolution_param
.
pad_w
]
##步长strides
strides
=
[
1
,
1
]
# 默认为1
if
layer
.
convolution_param
.
stride
!=
[]:
strides
=
np
.
array
([
layer
.
convolution_param
.
stride
]
*
2
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
elif
layer
.
convolution_param
.
stride_h
!=
0
and
layer
.
convolution_param
.
stride_w
!=
0
:
strides
=
[
layer
.
convolution_param
.
stride_h
,
layer
.
convolution_param
.
stride_w
]
##卷积核尺寸kernel_shape
kernel_shape
=
np
.
array
([
layer
.
convolution_param
.
kernel_size
]
*
2
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
if
layer
.
convolution_param
.
kernel_size
==
[]:
kernel_shape
=
[
layer
.
convolution_param
.
kernel_h
,
layer
.
convolution_param
.
kernel_w
]
##分组group
group
=
1
if
layer
.
type
==
"ConvolutionDepthwise"
:
group
=
input_shape
[
0
][
1
]
else
:
group
=
layer
.
convolution_param
.
group
# 超参数字典
dict
=
{
#"auto_pad":"NOTSET",
"dilations"
:
dilations
,
"group"
:
group
,
"kernel_shape"
:
kernel_shape
,
"pads"
:
pads
,
"strides"
:
strides
}
return
dict
# 计算输出维度
def
getConvOutShape
(
input_shape
,
layer
,
dict
):
dilations
=
dict
[
"dilations"
]
kernel_shape
=
dict
[
"kernel_shape"
]
pads
=
dict
[
"pads"
]
strides
=
dict
[
"strides"
]
##卷积核数量kernel_num
kernel_num
=
layer
.
convolution_param
.
num_output
# reference the caffe source code
kernel_extent_h
=
dilations
[
0
]
*
(
kernel_shape
[
0
]
-
1
)
+
1
output_shape_h
=
math
.
floor
((
input_shape
[
0
][
2
]
+
2
*
pads
[
0
]
-
kernel_extent_h
)
/
strides
[
0
])
+
1
kernel_extent_w
=
dilations
[
1
]
*
(
kernel_shape
[
1
]
-
1
)
+
1
output_shape_w
=
math
.
floor
((
input_shape
[
0
][
3
]
+
2
*
pads
[
1
]
-
kernel_extent_w
)
/
strides
[
1
])
+
1
output_shape
=
[[
input_shape
[
0
][
0
],
kernel_num
,
output_shape_h
,
output_shape_w
]]
return
output_shape
# 构建节点
def
createConv
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
attributes
=
getConvAttri
(
layer
,
input_shape
)
output_shape
=
getConvOutShape
(
input_shape
,
layer
,
attributes
)
# 构建node
node
=
Node
.
c2oNode
(
layer
,
node_name
,
"Conv"
,
input_name
,
output_name
,
input_shape
,
output_shape
,
attributes
)
return
node
caffe/caffe2onnx/src/OPs/ConvTranspose.py
0 → 100644
View file @
8590ec24
import
numpy
as
np
import
src.c2oObject
as
Node
##---------------------------------------------------ConvTranspose层-------------------------------------------------------##
#获取超参数
def
getConvTransposeAttri
(
layer
):
##膨胀系数dilations
dilations
=
[
1
,
1
]
if
layer
.
convolution_param
.
dilation
!=
[]:
dilation
=
layer
.
convolution_param
.
dilation
[
0
]
dilations
=
[
dilation
,
dilation
]
##填充pads
pads
=
[
0
,
0
,
0
,
0
]
# 默认为0
if
layer
.
convolution_param
.
pad
!=
[]:
# 若存在pad,则根据pad赋值
pads
=
np
.
array
([
layer
.
convolution_param
.
pad
]
*
4
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
elif
layer
.
convolution_param
.
pad_h
!=
0
or
layer
.
convolution_param
.
pad_w
!=
0
:
# 若存在pad_w,pad_h则根据其赋值
pads
=
[
layer
.
convolution_param
.
pad_h
,
layer
.
convolution_param
.
pad_w
,
layer
.
convolution_param
.
pad_h
,
layer
.
convolution_param
.
pad_w
]
##步长strides
strides
=
[
1
,
1
]
# 默认为1
if
layer
.
convolution_param
.
stride
!=
[]:
strides
=
np
.
array
([
layer
.
convolution_param
.
stride
]
*
2
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
elif
layer
.
convolution_param
.
stride_h
!=
0
and
layer
.
convolution_param
.
stride_w
!=
0
:
strides
=
[
layer
.
convolution_param
.
stride_h
,
layer
.
convolution_param
.
stride_w
]
##卷积核尺寸kernel_shape
kernel_shape
=
np
.
array
([
layer
.
convolution_param
.
kernel_size
]
*
2
).
reshape
(
1
,
-
1
)[
0
].
tolist
()
if
layer
.
convolution_param
.
kernel_size
==
[]:
kernel_shape
=
[
layer
.
convolution_param
.
kernel_h
,
layer
.
convolution_param
.
kernel_w
]
##分组group
group
=
layer
.
convolution_param
.
group
# 超参数字典
dict
=
{
# "auto_pad":"NOTSET",
"dilations"
:
dilations
,
"group"
:
group
,
"kernel_shape"
:
kernel_shape
,
"pads"
:
pads
,
"strides"
:
strides
}
return
dict
#计算输出维度
def
getConvTransposeOutShape
(
input_shape
,
layer
,
dict
):
dilations
=
dict
[
"dilations"
]
kernel_shape
=
dict
[
"kernel_shape"
]
pads
=
dict
[
"pads"
]
strides
=
dict
[
"strides"
]
##卷积核数量kernel_num
kernel_num
=
layer
.
convolution_param
.
num_output
def
get_output_shape
(
i
,
k
,
p
,
s
):
return
(
i
-
1
)
*
s
+
k
-
2
*
p
h
=
get_output_shape
(
input_shape
[
0
][
2
],
kernel_shape
[
0
],
pads
[
0
],
strides
[
0
])
w
=
get_output_shape
(
input_shape
[
0
][
3
],
kernel_shape
[
1
],
pads
[
1
],
strides
[
1
])
output_shape
=
[[
input_shape
[
0
][
0
],
kernel_num
,
h
,
w
]]
return
output_shape
#构建节点
def
createConvTranspose
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
dict
=
getConvTransposeAttri
(
layer
)
output_shape
=
getConvTransposeOutShape
(
input_shape
,
layer
,
dict
)
#构建node
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"ConvTranspose"
,
inname
,
outname
,
input_shape
,
output_shape
,
dict
)
return
node
caffe/caffe2onnx/src/OPs/Crop.py
0 → 100644
View file @
8590ec24
# Tencent is pleased to support the open source community by making TNN available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import
src.c2oObject
as
Node
import
numpy
as
np
def
get_crop_param
(
layer
,
input_shape
):
axis
:
int
=
layer
.
crop_param
.
axis
crop_offset
=
layer
.
crop_param
.
offset
if
not
crop_offset
:
offset_0
=
0
else
:
offset_0
=
crop_offset
[
0
]
offset
=
[]
starts
=
[]
axes
=
[]
ends
=
[]
for
i
in
range
(
len
(
input_shape
[
0
])):
if
i
<
axis
:
start
=
0
end
=
input_shape
[
1
][
i
]
else
:
if
(
i
-
axis
)
>=
len
(
crop_offset
):
offset
.
append
(
offset_0
)
else
:
offset
.
append
(
crop_offset
[
i
-
axis
])
start
=
offset
[
i
-
axis
]
end
=
start
+
input_shape
[
1
][
i
]
if
input_shape
[
0
][
i
]
!=
input_shape
[
1
][
i
]:
axes
.
append
(
i
)
starts
.
append
(
start
)
ends
.
append
(
end
)
return
starts
,
ends
,
axes
def
get_crop_output_shape
(
layer
,
input_shape
):
return
[
input_shape
[
1
]]
def
create_crop_node
(
layer
,
node_name
,
input_name
,
output_name
,
input_shape
):
output_shape
=
get_crop_output_shape
(
layer
,
input_shape
)
node
=
Node
.
c2oNode
(
layer
,
node_name
,
"Slice"
,
input_name
,
output_name
,
input_shape
,
output_shape
)
return
node
caffe/caffe2onnx/src/OPs/DetectionOutput.py
0 → 100644
View file @
8590ec24
# Tencent is pleased to support the open source community by making TNN available.
#
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import
onnx
from
typing
import
*
from
onnx
import
helper
from
typing
import
*
import
ctypes
import
src.c2oObject
as
Node
def
create_attribuates
(
layer
)
->
Dict
:
detection_output_param
=
layer
.
detection_output_param
num_classes
=
detection_output_param
.
num_classes
share_location
=
1
if
detection_output_param
.
share_location
else
0
background_label_id
=
detection_output_param
.
background_label_id
# NonMaximumSuppressionParameter
nms_threshold
=
detection_output_param
.
nms_param
.
nms_threshold
top_k
=
detection_output_param
.
nms_param
.
top_k
eta
=
detection_output_param
.
nms_param
.
eta
code_type
=
detection_output_param
.
code_type
variance_encoded_in_target
=
1
if
detection_output_param
.
variance_encoded_in_target
else
0
keep_top_k
=
detection_output_param
.
keep_top_k
confidence_threshold
=
detection_output_param
.
confidence_threshold
visualize
=
1
if
detection_output_param
.
visualize
else
0
visualize_threshold
=
detection_output_param
.
visualize_threshold
save_file
=
detection_output_param
.
save_file
# TODO: SaveOutputParameter
# save_output_param = detection_output_param.save_output_param
# output_directory: str = save_output_param.output_directory
# output_name_prefix: str = save_output_param.output_name_prefix
# output_format: str = save_output_param.output_format
# label_map_file: str = save_output_param.label_map_file
# name_size_file: str = save_output_param.name_size_file
# num_test_image: int = save_output_param.num_test_image
attributes
=
{
'num_classes'
:
num_classes
,
'share_location'
:
share_location
,
'background_label_id'
:
background_label_id
,
'nms_threshold'
:
nms_threshold
,
'top_k'
:
top_k
,
'eta'
:
eta
,
'code_type'
:
code_type
,
'variance_encoded_in_target'
:
variance_encoded_in_target
,
'keep_top_k'
:
keep_top_k
,
'confidence_threshold'
:
confidence_threshold
,
'visualize'
:
visualize
,
'visualize_threshold'
:
visualize_threshold
,
'save_file'
:
save_file
}
return
attributes
def
create_detection_output
(
layer
,
node_name
:
str
,
inputs_name
:
List
[
str
],
outputs_name
:
List
[
str
],
inputs_shape
:
List
,
)
->
onnx
.
NodeProto
:
attributes
=
create_attribuates
(
layer
)
outputs_shape
=
[[
1
,
1
,
1
,
7
]]
node
=
Node
.
c2oNode
(
layer
,
node_name
,
"DetectionOutput"
,
inputs_name
,
outputs_name
,
inputs_shape
,
outputs_shape
,
attributes
)
return
node
caffe/caffe2onnx/src/OPs/Dropout.py
0 → 100644
View file @
8590ec24
import
src.c2oObject
as
Node
##----------------------------------------------------Dropout层-------------------------------------------------------##
#获取超参数
def
getDropoutAttri
(
layer
):
##drop 比率
ratio
=
layer
.
dropout_param
.
dropout_ratio
#前向不需要dropout,ratio设置为0后,后续可以onnx工具优化掉
ratio
=
0.0
# 超参数字典
dict
=
{
"ratio"
:
ratio
}
return
dict
def
getDropoutOutShape
(
input_shape
):
# 计算输出维度output_shape
output_shape
=
input_shape
# 与输入维度一样
return
output_shape
#构建节点
def
createDropout
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
dict
=
getDropoutAttri
(
layer
)
output_shape
=
getDropoutOutShape
(
input_shape
)
# 构建node
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"Dropout"
,
inname
,
outname
,
input_shape
,
output_shape
,
dict
=
dict
)
return
node
caffe/caffe2onnx/src/OPs/Eltwise.py
0 → 100644
View file @
8590ec24
import
src.c2oObject
as
Node
##-------------------------------------------------eltwise层----------------------------------------------------------##
def
createEltwise
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
#判断算子类型
if
layer
.
eltwise_param
.
operation
==
0
:
node
=
__createMul
(
layer
,
nodename
,
inname
,
outname
,
input_shape
)
#按元素相乘
elif
layer
.
eltwise_param
.
operation
==
1
:
node
=
__createAdd
(
layer
,
nodename
,
inname
,
outname
,
input_shape
)
#按元素相加
elif
layer
.
eltwise_param
.
operation
==
2
:
node
=
__createMax
(
layer
,
nodename
,
inname
,
outname
,
input_shape
)
#按元素求最大值
return
node
##----------------------------------------------Mul层,对应Prod-----------------------------------------------##
def
__createMul
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
output_shape
=
input_shape
[
0
]
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"Mul"
,
inname
,
outname
,
input_shape
,
output_shape
)
return
node
##---------------------Add层,可能是两个中间层输出相加,也可能是一个输出加一个bias这种------------------------##
def
__createAdd
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
output_shape
=
[
input_shape
[
0
]]
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"Add"
,
inname
,
outname
,
input_shape
,
output_shape
)
return
node
##----------------------------------------------Max层-------------------------------------------------------------##
def
__createMax
(
layer
,
nodename
,
inname
,
outname
,
input_shape
):
output_shape
=
input_shape
node
=
Node
.
c2oNode
(
layer
,
nodename
,
"Max"
,
inname
,
outname
,
input_shape
,
output_shape
)
return
node
caffe/caffe2onnx/src/OPs/Flatten.py
0 → 100644
View file @
8590ec24
import
src.c2oObject
as
Node
from
typing
import
List
,
Dict
import
onnx
def
get_attributes
(
layer
)
->
Dict
:
axis
=
layer
.
flatten_param
.
axis
end_axis
=
layer
.
flatten_param
.
end_axis
if
end_axis
!=
-
1
:
print
(
"not support end_axis param!"
)
exit
(
-
1
)
attributes
=
{
"axis"
:
axis
}
return
attributes
def
get_flatten_output_shape
(
input_shape
:
List
,
attributes
:
Dict
)
->
List
:
shape
=
input_shape
[
0
]
input_prod
=
1
axis
=
attributes
.
get
(
"axis"
)
for
i
in
range
(
axis
,
len
(
shape
)):
input_prod
=
input_prod
*
shape
[
i
]
output_shape
=
[
shape
[
0
:
axis
]
+
[
input_prod
]]
return
output_shape
def
create_flatten_node
(
layer
,
node_name
:
str
,
input_names
:
List
,
output_name
:
List
,
input_shape
:
List
)
->
onnx
.
NodeProto
:
attributes
=
get_attributes
(
layer
)
output_shape
=
get_flatten_output_shape
(
input_shape
,
attributes
)
node
=
Node
.
c2oNode
(
layer
,
node_name
,
"Flatten"
,
input_names
,
output_name
,
input_shape
,
output_shape
,
attributes
)
return
node
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment