-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathconv.py
More file actions
186 lines (162 loc) · 7.3 KB
/
conv.py
File metadata and controls
186 lines (162 loc) · 7.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import numpy as np
from scipy import signal
import time
import pylab
from PIL import Image
class Conv(object):
def __init__(self, n_input_channels, n_output_channels,
kr_size,
activation_fn='relu'):
fan_in = n_input_channels * kr_size**2
fan_out = n_output_channels * kr_size**2 / 4 # 4 is for pool size
sigma = 2.0 / (fan_out+fan_in)
W = np.random.normal(loc=0.0,
scale=sigma,
size=(n_output_channels,
n_input_channels,
kr_size, kr_size))
# b = np.random.normal(loc=0.001,
# scale=0.0001,
# size=(n_output_channels))
b = np.zeros((n_output_channels,))
dE_dW = np.ndarray(W.shape)
dE_db = np.ndarray(b.shape)
self.name = 'conv'
self.params = [W, b]
self.gradParams = [dE_dW, dE_db]
self.accumulated_gradParams = []
self.acc_no = 0
self.n_input_channels = n_input_channels
self.n_output_channels = n_output_channels
self.kr_size = kr_size
self.momentum_init = False
self.i_t = 0
if activation_fn == 'relu':
self.activation_fn = lambda x: x * (x > 0)
self.activation_derivative = lambda x: 1. * (x > 0)
elif activation_fn == 'tanh':
self.activation_fn = np.tanh
self.activation_derivative = lambda x: 1.0 - np.tanh(x)**2
else:
raise NotImplementedError()
def forward(self, in_fmap):
'''
This function takes as input a feature map with
dimensions (n_input_channels, [some_width, some_height]).
The output is convolution if input feature map with kernels.
Convolution from scipy's convolve2d flips kernels in x and y
axis then operates.
'''
start_time = time.time()
W, b = self.params
assert in_fmap.shape[0] == self.n_input_channels
self.in_fmap = in_fmap
out_shape = signal.convolve2d(in_fmap[0], W[0][0], mode='valid').shape
out_shape = (self.n_output_channels,) + out_shape
out_fmap = np.zeros(out_shape)
deriv_out = np.ndarray(out_shape)
for i in xrange(self.n_output_channels):
for j in xrange(self.n_input_channels):
out_fmap[i] += b[i] +\
signal.convolve2d(in_fmap[j],
W[i][j],
mode='valid')
deriv_out[i] = self.activation_derivative(out_fmap[i])
out_fmap[i] = self.activation_fn(out_fmap[i])
self.deriv_out = deriv_out
self.time_taken = time.time() - start_time
return out_fmap
def accumulate_grads(self):
self.acc_no += 1
if self.accumulated_gradParams == []:
self.accumulated_gradParams = map(lambda x: np.copy(x),
self.gradParams)
else:
self.accumulated_gradParams = map(lambda x, y: np.add(x, y),
self.accumulated_gradParams,
self.gradParams)
def backward(self, deltas):
'''
This function takes an input deltas (dE_dX) from succeding layer
and gives out deltas with respect to current layer.
'''
W = self.params[0]
dE_dW, dE_db = self.gradParams
dE_dX = np.zeros(self.in_fmap.shape)
def flip_kr(kernel):
return np.flipud(np.fliplr(kernel))
# print np.sum(dE_dW), 'c+++', np.sum(dE_db)
for out_ch in xrange(self.n_output_channels):
gdY = np.multiply(deltas[out_ch], self.deriv_out[out_ch])
for in_ch in xrange(self.n_input_channels):
dE_dX[in_ch] += signal.convolve2d(deltas[out_ch],
flip_kr(W[out_ch][in_ch]))
# Calculating dE_dW
dE_dW[out_ch][in_ch] = flip_kr(signal.convolve2d(self.in_fmap[in_ch],
flip_kr(gdY),
mode='valid'))
# Calculating dE_db
dE_db[out_ch] = np.sum(gdY)
# print np.sum(dE_dW), 'c---', np.sum(dE_db)
self.gradParams = [dE_dW, dE_db]
self.accumulate_grads()
return dE_dX
def updateParams(self, hyperParams):
alpha, beta1, beta2 = hyperParams
epsilon = 10e-20
self.i_t += 1
self.gradParams = map(lambda x: x / self.acc_no,
self.accumulated_gradParams)
self.acc_no = 0
self.accumulated_gradParams = []
if not self.momentum_init:
self.momentum_init = True
self.momentum1 = []
self.momentum2 = []
for g in self.gradParams:
self.momentum1.append(np.copy(g))
self.momentum2.append(np.copy(np.square(g)))
else:
self.momentum1 = map(lambda m, g: np.add(beta1 * m,
(1 - beta1) * g),
self.momentum1, self.gradParams)
self.momentum2 = map(lambda m, g: np.add(beta2 * m,
(1 - beta2) *
np.square(g)),
self.momentum2, self.gradParams)
m_t = map(lambda x: x/(1 - beta1**self.i_t), self.momentum1)
v_t = map(lambda x: x/(1 - beta2**self.i_t), self.momentum2)
self.params = map(lambda theta, m, v:
np.subtract(theta,
alpha*np.divide(m,
np.sqrt(v + epsilon))),
self.params, m_t, v_t)
if __name__ == '__main__':
n_input_channels = 3
n_output_channels = 3
c = Conv(n_input_channels, n_output_channels, 3)
# sample_deltas = np.ndarray((n_output_channels))
# input_map = np.ones((n_input_channels, 28, 28))
# output = c.forward(input_map)
# # print output.shape
# sample_deltas = np.ones(output.shape)
# dE_dX = c.backward(sample_deltas * 10e-7)
# dE_dX = c.backward(sample_deltas * 10e-3)
# dE_dX = c.backward(sample_deltas * 10e-2)
# dE_dX = c.backward(sample_deltas * 10e-7)
# # print np.sum(dE_dX)
# c.updateParams([0.01, 0.9, 0.99])
img = Image.open(open('3wolfmoon.jpg'))
# dimensions are (height, width, channel)
img = np.asarray(img, dtype='float64') / 256.
# put image in 4D tensor of shape (1, 3, height, width)
img_ = img.transpose(2, 0, 1).reshape(3, 639, 516)
filtered_img = c.forward(img_)
# plot original image and first and second components of output
pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(img)
pylab.gray();
# recall that the convOp output (filtered image) is actually a "minibatch",
# of size 1 here, so we take index 0 in the first dimension:
pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered_img[0, :, :])
pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered_img[1, :, :])
pylab.show()