Sequential.lua 4.47 KB
Newer Older
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
-- Copyright 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the license found in the
-- LICENSE file in the root directory of this source tree.

--[[
Child class of the nn.Sequential ConvNet module container.
1. Fill it up with modules e.g. convolutions, max-pooling, ConcatTables,
CAddTables, Sequentials, ...
2. Use :suggestInputSize to determine how large the the spatial size of the
incoming InputBatches should be to get a desired output spatial size, i.e to
produce output of size 7x7, the input to a 2x2 Max-Pooling layer should be
14x14, and so on inductively, backwards through the network.
]]

return function(sparseconvnet)
  local Sequential, parent = torch.class(
    'sparseconvnet.Sequential', 'nn.Sequential', sparseconvnet)

  function Sequential:__init(...)
    parent.__init(self, ...)
    sparseconvnet.shareShared(self)
  end

  function Sequential:add(module)
    table.insert(self.modules,module)
    sparseconvnet.shareShared(self)
    return self
  end

  function Sequential:updateOutput(input)
    local currentOutput = input
    if input.precomputed then
      self.hasSeenPrecomputedInput = true
      self.shared.precomputed=input.precomputed
    elseif self.hasSeenPrecomputedInput then
      self.shared.precomputed=nil
    end
    for i=1,#self.modules do
      currentOutput = self:rethrowErrors(
        self.modules[i], i, 'updateOutput', currentOutput)
    end
    self.output = currentOutput
    return currentOutput
  end

  function Sequential:updateGradInput(input, gradOutput)
    local currentGradOutput = gradOutput
    local currentModule = self.modules[#self.modules]
    for i=#self.modules-1,1,-1 do
      local previousModule = self.modules[i]
      currentGradOutput = self:rethrowErrors(
        currentModule, i+1, 'updateGradInput',
        previousModule.output, currentGradOutput)
      currentModule = previousModule
    end
    currentGradOutput = self:rethrowErrors(
      currentModule, 1, 'updateGradInput', input, currentGradOutput)
    self.gradInput = currentGradOutput
    return currentGradOutput
  end

  function Sequential:accGradParameters(input, gradOutput, scale)
    scale = scale or 1
    local currentGradOutput = gradOutput
    local currentModule = self.modules[#self.modules]
    for i=#self.modules-1,1,-1 do
      local previousModule = self.modules[i]
      self:rethrowErrors(
        currentModule, i+1, 'accGradParameters',
        previousModule.output, currentGradOutput, scale)
      currentGradOutput = currentModule.gradInput
      currentModule = previousModule
    end
    self:rethrowErrors(currentModule, 1, 'accGradParameters', input,
      currentGradOutput, scale)
  end

  function Sequential:backward(input, gradOutput, scale)
    scale = scale or 1
    local currentGradOutput = gradOutput
    local currentModule = self.modules[#self.modules]
    for i=#self.modules-1,1,-1 do
      local previousModule = self.modules[i]
      currentGradOutput = self:rethrowErrors(currentModule, i+1, 'backward',
        previousModule.output, currentGradOutput, scale)
      currentModule.gradInput = currentGradOutput
      currentModule = previousModule
    end
    currentGradOutput = self:rethrowErrors(currentModule, 1, 'backward', input,
      currentGradOutput, scale)
    self.gradInput = currentGradOutput
    return currentGradOutput
  end

  function Sequential:accUpdateGradParameters(input, gradOutput, lr)
    local currentGradOutput = gradOutput
    local currentModule = self.modules[#self.modules]
    for i=#self.modules-1,1,-1 do
      local previousModule = self.modules[i]
      self:rethrowErrors(currentModule, i+1, 'accUpdateGradParameters',
        previousModule.output, currentGradOutput, lr)
      currentGradOutput = currentModule.gradInput
      currentModule = previousModule
    end

    self:rethrowErrors(currentModule, 1, 'accUpdateGradParameters', input,
      currentGradOutput, lr)
  end

  function Sequential:type(tensortype)
    self._type=tensortype
    for i=1,#self.modules do
      self.modules[i]:type(tensortype)
    end
    sparseconvnet.shareShared(self)
    return self
  end

  function Sequential:clearState()
    self.shared.precomputed=nil
    if self.shared.rulesBuffer then
      self.shared.rulesBuffer:set()
    end
    self.output=nil
    self.gradInput=nil
    for _,m in pairs(self.modules) do
      m:clearState()
    end
  end

  function Sequential:suggestInputSize(n)
    for i = #self.modules,1,-1 do
      n=self.modules[i]:suggestInputSize(n)
    end
    return n
  end
end