Unverified Commit 4b2f8dab authored by Francisco Massa's avatar Francisco Massa Committed by GitHub
Browse files

Fix C++ lint (#1584)

parent 10a71116
......@@ -99,10 +99,8 @@ DenseNetImpl::DenseNetImpl(
features = torch::nn::Sequential();
features->push_back(
"conv0",
torch::nn::Conv2d(Options(3, num_init_features, 7)
.stride(2)
.padding(3)
.bias(false)));
torch::nn::Conv2d(
Options(3, num_init_features, 7).stride(2).padding(3).bias(false)));
features->push_back("norm0", torch::nn::BatchNorm(num_init_features));
features->push_back("relu0", torch::nn::Functional(modelsimpl::relu_));
......
......@@ -23,8 +23,7 @@ struct MNASNetInvertedResidualImpl : torch::nn::Module {
auto mid = int64_t(input * expansion_factor);
apply_residual = input == output && stride == 1;
layers->push_back(
torch::nn::Conv2d(Options(input, mid, 1).bias(false)));
layers->push_back(torch::nn::Conv2d(Options(input, mid, 1).bias(false)));
layers->push_back(torch::nn::BatchNorm(
torch::nn::BatchNormOptions(mid).momentum(bn_momentum)));
layers->push_back(
......@@ -39,8 +38,7 @@ struct MNASNetInvertedResidualImpl : torch::nn::Module {
torch::nn::BatchNormOptions(mid).momentum(bn_momentum)));
layers->push_back(
torch::nn::Functional(torch::nn::Functional(modelsimpl::relu_)));
layers->push_back(
torch::nn::Conv2d(Options(mid, output, 1).bias(false)));
layers->push_back(torch::nn::Conv2d(Options(mid, output, 1).bias(false)));
layers->push_back(torch::nn::BatchNorm(
torch::nn::BatchNormOptions(output).momentum(bn_momentum)));
......@@ -128,8 +126,8 @@ void MNASNetImpl::_initialize_weights() {
MNASNetImpl::MNASNetImpl(double alpha, int64_t num_classes, double dropout) {
auto depths = scale_depths({24, 40, 80, 96, 192, 320}, alpha);
layers->push_back(torch::nn::Conv2d(
Options(3, 32, 3).padding(1).stride(2).bias(false)));
layers->push_back(
torch::nn::Conv2d(Options(3, 32, 3).padding(1).stride(2).bias(false)));
layers->push_back(torch::nn::BatchNorm(
torch::nn::BatchNormOptions(32).momentum(BN_MOMENTUM)));
layers->push_back(torch::nn::Functional(modelsimpl::relu_));
......@@ -138,8 +136,8 @@ MNASNetImpl::MNASNetImpl(double alpha, int64_t num_classes, double dropout) {
layers->push_back(torch::nn::BatchNorm(
torch::nn::BatchNormOptions(32).momentum(BN_MOMENTUM)));
layers->push_back(torch::nn::Functional(modelsimpl::relu_));
layers->push_back(torch::nn::Conv2d(
Options(32, 16, 1).padding(0).stride(1).bias(false)));
layers->push_back(
torch::nn::Conv2d(Options(32, 16, 1).padding(0).stride(1).bias(false)));
layers->push_back(torch::nn::BatchNorm(
torch::nn::BatchNormOptions(16).momentum(BN_MOMENTUM)));
......
......@@ -124,8 +124,8 @@ ResNetImpl<Block>::ResNetImpl(
: groups(groups),
base_width(width_per_group),
inplanes(64),
conv1(torch::nn::Conv2dOptions(3, 64, 7).stride(2).padding(3).bias(
false)),
conv1(
torch::nn::Conv2dOptions(3, 64, 7).stride(2).padding(3).bias(false)),
bn1(64),
layer1(_make_layer(64, layers[0])),
layer2(_make_layer(128, layers[1], 2)),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment