Commit 462d61ef authored by Joachim's avatar Joachim
Browse files

fixed backward pass in cont layer to accumulate gradients this will pass the...

fixed backward pass in cont layer to accumulate gradients this will pass the layer test now also removed compile warnings and changed some comments
parent 4df095af
...@@ -355,14 +355,16 @@ namespace dlib ...@@ -355,14 +355,16 @@ namespace dlib
) {} ) {}
void setup( void setup(
const tensor& data, const tensor& data, /* not used but required for interface */
const tensor& filters, const tensor& filters, /* not used but required for interface */
int stride_y, int stride_y,
int stride_x, int stride_x,
int padding_y, int padding_y,
int padding_x int padding_x
) )
{ {
(void)data; /* silence compiler */
(void)filters; /* silence compiler */
last_stride_y = stride_y; last_stride_y = stride_y;
last_stride_x = stride_x; last_stride_x = stride_x;
last_padding_y = padding_y; last_padding_y = padding_y;
......
...@@ -454,9 +454,9 @@ namespace dlib ...@@ -454,9 +454,9 @@ namespace dlib
temp.copy_size(sub.get_gradient_input()); temp.copy_size(sub.get_gradient_input());
auto filt = filters(params,0); auto filt = filters(params,0);
conv(temp,gradient_input, filt); conv(temp,gradient_input, filt);
tt::copy_tensor(sub.get_gradient_input(),0,temp,0,sub.get_gradient_input().k()); // need to add the new gradients on top of the previous ones
tt::add(1,sub.get_gradient_input(),1,temp);
// no point computing the parameter gradients if they won't be used. // no point computing the parameter gradients if they won't be used.
if (learning_rate_multiplier != 0) if (learning_rate_multiplier != 0)
{ {
auto filt = filters(params_grad,0); auto filt = filters(params_grad,0);
...@@ -582,8 +582,6 @@ namespace dlib ...@@ -582,8 +582,6 @@ namespace dlib
double bias_learning_rate_multiplier; double bias_learning_rate_multiplier;
double bias_weight_decay_multiplier; double bias_weight_decay_multiplier;
// These are here only because older versions of con (which you might encounter
// serialized to disk) used different padding settings.
int padding_y_; int padding_y_;
int padding_x_; int padding_x_;
......
...@@ -850,16 +850,17 @@ namespace dlib ...@@ -850,16 +850,17 @@ namespace dlib
WHAT THIS OBJECT REPRESENTS WHAT THIS OBJECT REPRESENTS
This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface This is an implementation of the EXAMPLE_COMPUTATIONAL_LAYER_ interface
defined above. In particular, it defines a convolution layer that takes an defined above. In particular, it defines a transposed convolution layer
input tensor (nominally representing an image) and convolves it with a set that takes an input tensor (nominally representing an image) and
of filters and then outputs the results. transpose convolves (deconvolves) it with a set of filters and then outputs the results.
This is basically a convolutional layer with reversed forward/backward passes
The dimensions of the tensors output by this layer are as follows (letting The dimensions of the tensors output by this layer are as follows (letting
IN be the input tensor and OUT the output tensor): IN be the input tensor and OUT the output tensor):
- OUT.num_samples() == IN.num_samples() - OUT.num_samples() == IN.num_samples()
- OUT.k() == num_filters() - OUT.k() == num_filters()
- OUT.nr() == 1+(IN.nr() + 2*padding_y() - nr())/stride_y() - OUT.nr() == stride_y * (IN.nr() -1) + nr) - 2*padding_y
- OUT.nc() == 1+(IN.nc() + 2*padding_x() - nc())/stride_x() - OUT.nc() == stride_x * (IN.nc() -1) + nc) - 2*padding_x
!*/ !*/
public: public:
......
...@@ -1410,6 +1410,18 @@ namespace ...@@ -1410,6 +1410,18 @@ namespace
auto res = test_layer(l); auto res = test_layer(l);
DLIB_TEST_MSG(res, res); DLIB_TEST_MSG(res, res);
} }
{
print_spinner();
cont_<3,3,3,1,1> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{
print_spinner();
cont_<3,2,2,2,2> l;
auto res = test_layer(l);
DLIB_TEST_MSG(res, res);
}
{ {
print_spinner(); print_spinner();
con_<3,2,2,2,2> l; con_<3,2,2,2,2> l;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment