Unverified Commit 21f70c17 authored by YosuaMichael's avatar YosuaMichael Committed by GitHub
Browse files

Remove cpp model in v0.14 due to deprecation (#6632)

* Remove cpp models

* Also remove the whole models folder in csrc

* Cleanup test for cpp model
parent c4c28dff
#pragma once
#include <torch/nn.h>
#include "../macros.h"
namespace vision {
namespace models {
struct VISION_API ShuffleNetV2Impl : torch::nn::Module {
std::vector<int64_t> _stage_out_channels;
torch::nn::Sequential conv1{nullptr}, stage2, stage3, stage4, conv5{nullptr};
torch::nn::Linear fc{nullptr};
ShuffleNetV2Impl(
const std::vector<int64_t>& stage_repeats,
const std::vector<int64_t>& stage_out_channels,
int64_t num_classes = 1000);
torch::Tensor forward(torch::Tensor x);
};
struct VISION_API ShuffleNetV2_x0_5Impl : ShuffleNetV2Impl {
explicit ShuffleNetV2_x0_5Impl(int64_t num_classes = 1000);
};
struct VISION_API ShuffleNetV2_x1_0Impl : ShuffleNetV2Impl {
explicit ShuffleNetV2_x1_0Impl(int64_t num_classes = 1000);
};
struct VISION_API ShuffleNetV2_x1_5Impl : ShuffleNetV2Impl {
explicit ShuffleNetV2_x1_5Impl(int64_t num_classes = 1000);
};
struct VISION_API ShuffleNetV2_x2_0Impl : ShuffleNetV2Impl {
explicit ShuffleNetV2_x2_0Impl(int64_t num_classes = 1000);
};
TORCH_MODULE(ShuffleNetV2);
TORCH_MODULE(ShuffleNetV2_x0_5);
TORCH_MODULE(ShuffleNetV2_x1_0);
TORCH_MODULE(ShuffleNetV2_x1_5);
TORCH_MODULE(ShuffleNetV2_x2_0);
} // namespace models
} // namespace vision
#include "squeezenet.h"
#include "modelsimpl.h"
namespace vision {
namespace models {
struct Fire : torch::nn::Module {
torch::nn::Conv2d squeeze, expand1x1, expand3x3;
Fire(
int64_t inplanes,
int64_t squeeze_planes,
int64_t expand1x1_planes,
int64_t expand3x3_planes)
: squeeze(torch::nn::Conv2dOptions(inplanes, squeeze_planes, 1)),
expand1x1(
torch::nn::Conv2dOptions(squeeze_planes, expand1x1_planes, 1)),
expand3x3(torch::nn::Conv2dOptions(squeeze_planes, expand3x3_planes, 3)
.padding(1)) {
register_module("squeeze", squeeze);
register_module("expand1x1", expand1x1);
register_module("expand3x3", expand3x3);
}
torch::Tensor forward(torch::Tensor x) {
x = torch::relu(squeeze->forward(x));
return torch::cat(
{torch::relu(expand1x1->forward(x)),
torch::relu(expand3x3->forward(x))},
1);
}
};
SqueezeNetImpl::SqueezeNetImpl(double version, int64_t num_classes)
: num_classes(num_classes) {
if (modelsimpl::double_compare(version, 1.0)) {
features = torch::nn::Sequential(
torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 96, 7).stride(2)),
torch::nn::Functional(modelsimpl::relu_),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(512, 64, 256, 256));
} else if (modelsimpl::double_compare(version, 1.1)) {
features = torch::nn::Sequential(
torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 64, 3).stride(2)),
torch::nn::Functional(modelsimpl::relu_),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256));
} else
TORCH_CHECK(
false,
"Unsupported SqueezeNet version ",
version,
". 1_0 or 1_1 expected");
// Final convolution is initialized differently from the rest
auto final_conv =
torch::nn::Conv2d(torch::nn::Conv2dOptions(512, num_classes, 1));
classifier = torch::nn::Sequential(
torch::nn::Dropout(0.5),
final_conv,
torch::nn::Functional(modelsimpl::relu_),
torch::nn::Functional(modelsimpl::adaptive_avg_pool2d, 1));
register_module("features", features);
register_module("classifier", classifier);
for (auto& module : modules(/*include_self=*/false))
if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) {
if (M == final_conv.get())
torch::nn::init::normal_(M->weight, 0.0, 0.01);
else
torch::nn::init::kaiming_uniform_(M->weight);
if (M->options.bias())
torch::nn::init::constant_(M->bias, 0);
}
modelsimpl::deprecation_warning();
}
torch::Tensor SqueezeNetImpl::forward(torch::Tensor x) {
x = features->forward(x);
x = classifier->forward(x);
return x.view({x.size(0), -1});
}
SqueezeNet1_0Impl::SqueezeNet1_0Impl(int64_t num_classes)
: SqueezeNetImpl(1.0, num_classes) {}
SqueezeNet1_1Impl::SqueezeNet1_1Impl(int64_t num_classes)
: SqueezeNetImpl(1.1, num_classes) {}
} // namespace models
} // namespace vision
#pragma once
#include <torch/nn.h>
#include "../macros.h"
namespace vision {
namespace models {
struct VISION_API SqueezeNetImpl : torch::nn::Module {
int64_t num_classes;
torch::nn::Sequential features{nullptr}, classifier{nullptr};
explicit SqueezeNetImpl(double version = 1.0, int64_t num_classes = 1000);
torch::Tensor forward(torch::Tensor x);
};
// SqueezeNet model architecture from the "SqueezeNet: AlexNet-level
// accuracy with 50x fewer parameters and <0.5MB model size"
// <https://arxiv.org/abs/1602.07360> paper.
struct VISION_API SqueezeNet1_0Impl : SqueezeNetImpl {
explicit SqueezeNet1_0Impl(int64_t num_classes = 1000);
};
// SqueezeNet 1.1 model from the official SqueezeNet repo
// <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>.
// SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
// than SqueezeNet 1.0, without sacrificing accuracy.
struct VISION_API SqueezeNet1_1Impl : SqueezeNetImpl {
explicit SqueezeNet1_1Impl(int64_t num_classes = 1000);
};
TORCH_MODULE(SqueezeNet);
TORCH_MODULE(SqueezeNet1_0);
TORCH_MODULE(SqueezeNet1_1);
} // namespace models
} // namespace vision
#include "vgg.h"
#include <unordered_map>
#include "modelsimpl.h"
namespace vision {
namespace models {
torch::nn::Sequential makeLayers(
const std::vector<int>& cfg,
bool batch_norm = false) {
torch::nn::Sequential seq;
auto channels = 3;
for (const auto& V : cfg) {
if (V <= -1)
seq->push_back(torch::nn::Functional(modelsimpl::max_pool2d, 2, 2));
else {
seq->push_back(torch::nn::Conv2d(
torch::nn::Conv2dOptions(channels, V, 3).padding(1)));
if (batch_norm)
seq->push_back(torch::nn::BatchNorm2d(V));
seq->push_back(torch::nn::Functional(modelsimpl::relu_));
channels = V;
}
}
return seq;
}
void VGGImpl::_initialize_weights() {
for (auto& module : modules(/*include_self=*/false)) {
if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) {
torch::nn::init::kaiming_normal_(
M->weight,
/*a=*/0,
torch::kFanOut,
torch::kReLU);
torch::nn::init::constant_(M->bias, 0);
} else if (
auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) {
torch::nn::init::constant_(M->weight, 1);
torch::nn::init::constant_(M->bias, 0);
} else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) {
torch::nn::init::normal_(M->weight, 0, 0.01);
torch::nn::init::constant_(M->bias, 0);
}
}
}
VGGImpl::VGGImpl(
const torch::nn::Sequential& features,
int64_t num_classes,
bool initialize_weights) {
classifier = torch::nn::Sequential(
torch::nn::Linear(512 * 7 * 7, 4096),
torch::nn::Functional(modelsimpl::relu_),
torch::nn::Dropout(),
torch::nn::Linear(4096, 4096),
torch::nn::Functional(modelsimpl::relu_),
torch::nn::Dropout(),
torch::nn::Linear(4096, num_classes));
this->features = features;
register_module("features", this->features);
register_module("classifier", classifier);
if (initialize_weights)
_initialize_weights();
modelsimpl::deprecation_warning();
}
torch::Tensor VGGImpl::forward(torch::Tensor x) {
x = features->forward(x);
x = torch::adaptive_avg_pool2d(x, {7, 7});
x = x.view({x.size(0), -1});
x = classifier->forward(x);
return x;
}
// clang-format off
static std::unordered_map<char, std::vector<int>> cfgs = {
{'A', {64, -1, 128, -1, 256, 256, -1, 512, 512, -1, 512, 512, -1}},
{'B', {64, 64, -1, 128, 128, -1, 256, 256, -1, 512, 512, -1, 512, 512, -1}},
{'D', {64, 64, -1, 128, 128, -1, 256, 256, 256, -1, 512, 512, 512, -1, 512, 512, 512, -1}},
{'E', {64, 64, -1, 128, 128, -1, 256, 256, 256, 256, -1, 512, 512, 512, 512, -1, 512, 512, 512, 512, -1}}};
// clang-format on
VGG11Impl::VGG11Impl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['A']), num_classes, initialize_weights) {}
VGG13Impl::VGG13Impl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['B']), num_classes, initialize_weights) {}
VGG16Impl::VGG16Impl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['D']), num_classes, initialize_weights) {}
VGG19Impl::VGG19Impl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['E']), num_classes, initialize_weights) {}
VGG11BNImpl::VGG11BNImpl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['A'], true), num_classes, initialize_weights) {}
VGG13BNImpl::VGG13BNImpl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['B'], true), num_classes, initialize_weights) {}
VGG16BNImpl::VGG16BNImpl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['D'], true), num_classes, initialize_weights) {}
VGG19BNImpl::VGG19BNImpl(int64_t num_classes, bool initialize_weights)
: VGGImpl(makeLayers(cfgs['E'], true), num_classes, initialize_weights) {}
} // namespace models
} // namespace vision
#pragma once
#include <torch/nn.h>
#include "../macros.h"
namespace vision {
namespace models {
struct VISION_API VGGImpl : torch::nn::Module {
torch::nn::Sequential features{nullptr}, classifier{nullptr};
void _initialize_weights();
explicit VGGImpl(
const torch::nn::Sequential& features,
int64_t num_classes = 1000,
bool initialize_weights = true);
torch::Tensor forward(torch::Tensor x);
};
// VGG 11-layer model (configuration "A")
struct VISION_API VGG11Impl : VGGImpl {
explicit VGG11Impl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 13-layer model (configuration "B")
struct VISION_API VGG13Impl : VGGImpl {
explicit VGG13Impl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 16-layer model (configuration "D")
struct VISION_API VGG16Impl : VGGImpl {
explicit VGG16Impl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 19-layer model (configuration "E")
struct VISION_API VGG19Impl : VGGImpl {
explicit VGG19Impl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 11-layer model (configuration "A") with batch normalization
struct VISION_API VGG11BNImpl : VGGImpl {
explicit VGG11BNImpl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 13-layer model (configuration "B") with batch normalization
struct VISION_API VGG13BNImpl : VGGImpl {
explicit VGG13BNImpl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 16-layer model (configuration "D") with batch normalization
struct VISION_API VGG16BNImpl : VGGImpl {
explicit VGG16BNImpl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
// VGG 19-layer model (configuration 'E') with batch normalization
struct VISION_API VGG19BNImpl : VGGImpl {
explicit VGG19BNImpl(
int64_t num_classes = 1000,
bool initialize_weights = true);
};
TORCH_MODULE(VGG);
TORCH_MODULE(VGG11);
TORCH_MODULE(VGG13);
TORCH_MODULE(VGG16);
TORCH_MODULE(VGG19);
TORCH_MODULE(VGG11BN);
TORCH_MODULE(VGG13BN);
TORCH_MODULE(VGG16BN);
TORCH_MODULE(VGG19BN);
} // namespace models
} // namespace vision
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment