Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dlib
Commits
af40aa1b
"git@developer.sourcefind.cn:change/sglang.git" did not exist on "8dbdc018a31e1437fb40fec48fd0a9a6ba4fdaee"
Commit
af40aa1b
authored
May 07, 2017
by
Davis King
Browse files
Cleaned up how the output is output.
parent
7494f51d
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
111 additions
and
100 deletions
+111
-100
tools/convert_dlib_nets_to_caffe/main.cpp
tools/convert_dlib_nets_to_caffe/main.cpp
+111
-100
No files found.
tools/convert_dlib_nets_to_caffe/main.cpp
View file @
af40aa1b
...
...
@@ -108,42 +108,48 @@ void convert_dlib_xml_to_cafffe_python_code(
const
string
&
xml_filename
)
{
const
string
out_filename
=
left_substr
(
xml_filename
,
"."
)
+
"_dlib_to_caffe_model.py"
;
cout
<<
"Writing model to "
<<
out_filename
<<
endl
;
ofstream
fout
(
out_filename
);
fout
.
precision
(
9
);
const
auto
layers
=
parse_dlib_xml
(
xml_filename
);
c
out
<<
"import caffe "
<<
endl
;
c
out
<<
"from caffe import layers as L, params as P"
<<
endl
;
c
out
<<
"import numpy as np"
<<
endl
;
f
out
<<
"import caffe "
<<
endl
;
f
out
<<
"from caffe import layers as L, params as P"
<<
endl
;
f
out
<<
"import numpy as np"
<<
endl
;
// dlib nets don't commit to a batch size, so just use 1 as the default
cout
<<
"batch_size = 1;"
<<
endl
;
fout
<<
"
\n
# Input tensor dimensions"
<<
endl
;
fout
<<
"batch_size = 1;"
<<
endl
;
if
(
layers
.
back
().
detail_name
==
"input_rgb_image"
)
{
c
out
<<
"input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
c
out
<<
"input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
c
out
<<
"input_k = 3;"
<<
endl
;
f
out
<<
"input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
f
out
<<
"input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
f
out
<<
"input_k = 3;"
<<
endl
;
}
else
if
(
layers
.
back
().
detail_name
==
"input_rgb_image_sized"
)
{
c
out
<<
"input_nr = "
<<
layers
.
back
().
attribute
(
"nr"
)
<<
";"
<<
endl
;
c
out
<<
"input_nc = "
<<
layers
.
back
().
attribute
(
"nc"
)
<<
";"
<<
endl
;
c
out
<<
"input_k = 3;"
<<
endl
;
f
out
<<
"input_nr = "
<<
layers
.
back
().
attribute
(
"nr"
)
<<
";"
<<
endl
;
f
out
<<
"input_nc = "
<<
layers
.
back
().
attribute
(
"nc"
)
<<
";"
<<
endl
;
f
out
<<
"input_k = 3;"
<<
endl
;
}
else
if
(
layers
.
back
().
detail_name
==
"input"
)
{
c
out
<<
"input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
c
out
<<
"input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
c
out
<<
"input_k = 1;"
<<
endl
;
f
out
<<
"input_nr = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
f
out
<<
"input_nc = 28; #WARNING, the source dlib network didn't commit to a specific input size, so we put 28 here as a default."
<<
endl
;
f
out
<<
"input_k = 1;"
<<
endl
;
}
else
{
throw
dlib
::
error
(
"No known transformation from dlib's "
+
layers
.
back
().
detail_name
+
" layer to caffe."
);
}
fout
<<
endl
;
c
out
<<
"def make_netspec():"
<<
endl
;
c
out
<<
" # For reference, the only
\"
documentation
\"
about caffe layer parameters seems to be this page:
\n
"
;
c
out
<<
" # https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
\n
"
<<
endl
;
c
out
<<
" n = caffe.NetSpec(); "
<<
endl
;
c
out
<<
" n.data,n.label = L.MemoryData(batch_size=batch_size, channels=input_k, height=input_nr, width=input_nc, ntop=2)"
<<
endl
;
f
out
<<
"def make_netspec():"
<<
endl
;
f
out
<<
" # For reference, the only
\"
documentation
\"
about caffe layer parameters seems to be this page:
\n
"
;
f
out
<<
" # https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto
\n
"
<<
endl
;
f
out
<<
" n = caffe.NetSpec(); "
<<
endl
;
f
out
<<
" n.data,n.label = L.MemoryData(batch_size=batch_size, channels=input_k, height=input_nr, width=input_nc, ntop=2)"
<<
endl
;
// iterate the layers starting with the input layer
for
(
auto
i
=
layers
.
rbegin
();
i
!=
layers
.
rend
();
++
i
)
{
...
...
@@ -154,33 +160,33 @@ void convert_dlib_xml_to_cafffe_python_code(
if
(
i
->
detail_name
==
"con"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Convolution(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", num_output="
<<
i
->
attribute
(
"num_filters"
);
c
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
c
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
c
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
c
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
c
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
c
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Convolution(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", num_output="
<<
i
->
attribute
(
"num_filters"
);
f
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
f
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
f
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
f
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
f
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
f
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"relu"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.ReLU(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.ReLU(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"max_pool"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Pooling(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", pool=P.Pooling.MAX"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Pooling(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", pool=P.Pooling.MAX"
;
if
(
i
->
attribute
(
"nc"
)
==
0
)
{
c
out
<<
", global_pooling=True"
;
f
out
<<
", global_pooling=True"
;
}
else
{
c
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
c
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
f
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
f
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
}
if
(
i
->
attribute
(
"padding_x"
)
!=
0
||
i
->
attribute
(
"padding_y"
)
!=
0
)
{
...
...
@@ -188,24 +194,24 @@ void convert_dlib_xml_to_cafffe_python_code(
"network with such pooling layers."
);
}
c
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
c
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
c
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
c
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
c
out
<<
");
\n
"
;
f
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
f
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
f
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
f
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"avg_pool"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Pooling(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", pool=P.Pooling.AVE"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Pooling(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", pool=P.Pooling.AVE"
;
if
(
i
->
attribute
(
"nc"
)
==
0
)
{
c
out
<<
", global_pooling=True"
;
f
out
<<
", global_pooling=True"
;
}
else
{
c
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
c
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
f
out
<<
", kernel_w="
<<
i
->
attribute
(
"nc"
);
f
out
<<
", kernel_h="
<<
i
->
attribute
(
"nr"
);
}
if
(
i
->
attribute
(
"padding_x"
)
!=
0
||
i
->
attribute
(
"padding_y"
)
!=
0
)
{
...
...
@@ -213,25 +219,25 @@ void convert_dlib_xml_to_cafffe_python_code(
"network with such pooling layers."
);
}
c
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
c
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
c
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
c
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
c
out
<<
");
\n
"
;
f
out
<<
", stride_w="
<<
i
->
attribute
(
"stride_x"
);
f
out
<<
", stride_h="
<<
i
->
attribute
(
"stride_y"
);
f
out
<<
", pad_w="
<<
i
->
attribute
(
"padding_x"
);
f
out
<<
", pad_h="
<<
i
->
attribute
(
"padding_y"
);
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"fc"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.InnerProduct(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", num_output="
<<
i
->
attribute
(
"num_outputs"
);
c
out
<<
", bias_term=True"
;
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.InnerProduct(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", num_output="
<<
i
->
attribute
(
"num_outputs"
);
f
out
<<
", bias_term=True"
;
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"fc_no_bias"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.InnerProduct(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", num_output="
<<
i
->
attribute
(
"num_outputs"
);
c
out
<<
", bias_term=False"
;
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.InnerProduct(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", num_output="
<<
i
->
attribute
(
"num_outputs"
);
f
out
<<
", bias_term=False"
;
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"bn_con"
||
i
->
detail_name
==
"bn_fc"
)
{
...
...
@@ -240,50 +246,50 @@ void convert_dlib_xml_to_cafffe_python_code(
}
else
if
(
i
->
detail_name
==
"affine_con"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Scale(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", axis=1"
;
c
out
<<
", bias_term=True"
;
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Scale(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", axis=1"
;
f
out
<<
", bias_term=True"
;
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"affine_fc"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Scale(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", axis=3"
;
c
out
<<
", bias_term=True"
;
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Scale(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", axis=3"
;
f
out
<<
", bias_term=True"
;
f
out
<<
");
\n
"
;
}
else
if
(
i
->
detail_name
==
"add_prev"
)
{
c
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Eltwise(n."
<<
find_input_layer_caffe_name
(
i
);
c
out
<<
", n."
<<
find_layer_caffe_name
(
i
,
i
->
attribute
(
"tag"
));
c
out
<<
", operation=P.Eltwise.SUM"
;
c
out
<<
");
\n
"
;
f
out
<<
" n."
<<
i
->
caffe_layer_name
()
<<
" = L.Eltwise(n."
<<
find_input_layer_caffe_name
(
i
);
f
out
<<
", n."
<<
find_layer_caffe_name
(
i
,
i
->
attribute
(
"tag"
));
f
out
<<
", operation=P.Eltwise.SUM"
;
f
out
<<
");
\n
"
;
}
else
{
throw
dlib
::
error
(
"No known transformation from dlib's "
+
i
->
detail_name
+
" layer to caffe."
);
}
}
c
out
<<
" return n.to_proto();
\n\n
"
<<
endl
;
f
out
<<
" return n.to_proto();
\n\n
"
<<
endl
;
// -------------------------
// -------------------------
c
out
<<
"def save_as_caffe_model(def_file, weights_file):
\n
"
;
c
out
<<
" with open(def_file, 'w') as f: f.write(str(make_netspec()));
\n
"
;
c
out
<<
" net = caffe.Net(def_file, caffe.TEST);
\n
"
;
c
out
<<
" set_network_weights(net);
\n
"
;
c
out
<<
" net.save(weights_file);
\n\n
"
;
f
out
<<
"def save_as_caffe_model(def_file, weights_file):
\n
"
;
f
out
<<
" with open(def_file, 'w') as f: f.write(str(make_netspec()));
\n
"
;
f
out
<<
" net = caffe.Net(def_file, caffe.TEST);
\n
"
;
f
out
<<
" set_network_weights(net);
\n
"
;
f
out
<<
" net.save(weights_file);
\n\n
"
;
// -------------------------
// -------------------------
c
out
<<
"def set_network_weights(net):
\n
"
;
c
out
<<
" # populate network parameters
\n
"
;
f
out
<<
"def set_network_weights(net):
\n
"
;
f
out
<<
" # populate network parameters
\n
"
;
// iterate the layers starting with the input layer
for
(
auto
i
=
layers
.
rbegin
();
i
!=
layers
.
rend
();
++
i
)
{
...
...
@@ -299,14 +305,14 @@ void convert_dlib_xml_to_cafffe_python_code(
matrix
<
double
>
biases
=
trans
(
rowm
(
i
->
params
,
range
(
i
->
params
.
size
()
-
num_filters
,
i
->
params
.
size
()
-
1
)));
// main filter weights
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
weights
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
weights
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
// biases
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
biases
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
biases
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
}
else
if
(
i
->
detail_name
==
"fc"
)
{
...
...
@@ -314,23 +320,23 @@ void convert_dlib_xml_to_cafffe_python_code(
matrix
<
double
>
biases
=
rowm
(
i
->
params
,
i
->
params
.
nr
()
-
1
);
// main filter weights
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
weights
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
weights
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
// biases
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
biases
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
biases
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
}
else
if
(
i
->
detail_name
==
"fc_no_bias"
)
{
matrix
<
double
>
weights
=
trans
(
i
->
params
);
// main filter weights
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
weights
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
weights
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
}
else
if
(
i
->
detail_name
==
"affine_con"
||
i
->
detail_name
==
"affine_fc"
)
{
...
...
@@ -339,14 +345,14 @@ void convert_dlib_xml_to_cafffe_python_code(
matrix
<
double
>
beta
=
trans
(
rowm
(
i
->
params
,
range
(
dims
,
2
*
dims
-
1
)));
// set gamma weights
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
gamma
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
gamma
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][0].data[:] = p;
\n
"
;
// set beta weights
c
out
<<
" p = "
;
print_as_np_array
(
c
out
,
beta
);
c
out
<<
";
\n
"
;
c
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
c
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
f
out
<<
" p = "
;
print_as_np_array
(
f
out
,
beta
);
f
out
<<
";
\n
"
;
f
out
<<
" p.shape = net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data.shape;
\n
"
;
f
out
<<
" net.params['"
<<
i
->
caffe_layer_name
()
<<
"'][1].data[:] = p;
\n
"
;
}
}
...
...
@@ -356,8 +362,13 @@ void convert_dlib_xml_to_cafffe_python_code(
int
main
(
int
argc
,
char
**
argv
)
try
{
cout
.
precision
(
9
);
// TODO, write out to multiple files or just process one file at a time.
if
(
argc
==
1
)
{
cout
<<
"Give this program an xml file generated by dlib::net_to_xml() and it will"
<<
endl
;
cout
<<
"convert it into a python file that outputs a caffe model containing the dlib model."
<<
endl
;
return
0
;
}
for
(
int
i
=
1
;
i
<
argc
;
++
i
)
convert_dlib_xml_to_cafffe_python_code
(
argv
[
i
]);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment