separate cost computation from backprop
This commit is contained in:
parent
79c2cbe69b
commit
4bc26caef6
42
mlp.py
42
mlp.py
|
@ -342,27 +342,19 @@ class MultiLayerPerceptron(object):
|
||||||
self.propagate()
|
self.propagate()
|
||||||
return self._A[self._L]
|
return self._A[self._L]
|
||||||
|
|
||||||
def back_propagation(self, get_cost_function=False):
|
def compute_cost_function(self):
|
||||||
"""Back propagation
|
"""Compute cost function:
|
||||||
|
|
||||||
:param get_cost_function: if True the cost function J
|
|
||||||
will be computed and returned.
|
|
||||||
J = -1/m((Y(A.T)) + (1-Y)(A.T))
|
J = -1/m((Y(A.T)) + (1-Y)(A.T))
|
||||||
if self._regularization will add:
|
if self._regularization is True will add:
|
||||||
J += lamda/(2*m)*Wnorm
|
J += lamda/(2*m)*Wnorm
|
||||||
:return: the cost function if get_cost_function==True else None
|
|
||||||
|
:return: the cost function result
|
||||||
|
|
||||||
"""
|
"""
|
||||||
J = None
|
|
||||||
L = self._L
|
L = self._L
|
||||||
m = self._m
|
m = self._m
|
||||||
dW = [None] + [None] * self._L
|
J = None
|
||||||
db = [None] + [None] * self._L
|
|
||||||
dA = [None] + [None] * self._L
|
|
||||||
dA[L] = -self._Y/self._A[L] + ((1-self._Y)/(1-self._A[L]))
|
|
||||||
|
|
||||||
# Compute cost function
|
# Compute cost function
|
||||||
if get_cost_function:
|
|
||||||
if self._softmax:
|
if self._softmax:
|
||||||
# case of softmax multi-class
|
# case of softmax multi-class
|
||||||
loss = -np.sum(self._Y * np.log(self._A[L]), axis=0)
|
loss = -np.sum(self._Y * np.log(self._A[L]), axis=0)
|
||||||
|
@ -376,6 +368,16 @@ class MultiLayerPerceptron(object):
|
||||||
for w in self._W[1:]:
|
for w in self._W[1:]:
|
||||||
wnorms += np.linalg.norm(w)
|
wnorms += np.linalg.norm(w)
|
||||||
J += self._lambda_regul/(2*m) * wnorms
|
J += self._lambda_regul/(2*m) * wnorms
|
||||||
|
return J
|
||||||
|
|
||||||
|
def back_propagation(self, get_cost_function=False):
|
||||||
|
"""Back propagation"""
|
||||||
|
L = self._L
|
||||||
|
m = self._m
|
||||||
|
dW = [None] + [None] * self._L
|
||||||
|
db = [None] + [None] * self._L
|
||||||
|
dA = [None] + [None] * self._L
|
||||||
|
dA[L] = -self._Y/self._A[L] + ((1-self._Y)/(1-self._A[L]))
|
||||||
|
|
||||||
# Compute weights derivatives
|
# Compute weights derivatives
|
||||||
for l in range(L, 0, -1):
|
for l in range(L, 0, -1):
|
||||||
|
@ -417,30 +419,34 @@ class MultiLayerPerceptron(object):
|
||||||
self._W[l] = self._W[l] - self._alpha * w_factor
|
self._W[l] = self._W[l] - self._alpha * w_factor
|
||||||
self._b[l] = self._b[l] - self._alpha * b_factor
|
self._b[l] = self._b[l] - self._alpha * b_factor
|
||||||
|
|
||||||
return J
|
|
||||||
|
|
||||||
def minimize_cost(self, min_cost, max_iter=100000, alpha=None, plot=False):
|
def minimize_cost(self, min_cost, max_iter=100000, alpha=None, plot=False):
|
||||||
"""Propagate forward then backward in loop while minimizing the cost function.
|
"""Propagate forward then backward in loop while minimizing the cost function.
|
||||||
|
|
||||||
:param min_cost: cost function value to reach in order to stop algo.
|
:param min_cost: cost function value to reach in order to stop algo.
|
||||||
:param max_iter: maximum number of iterations to reach min cost befor stoping algo. (Default 100000).
|
:param max_iter: maximum number of iterations to reach min cost befor stoping algo. (Default 100000).
|
||||||
:param alpha: learning rate, if None use the instance alpha value. Default None.
|
:param alpha: learning rate, if None use the instance alpha value. Default None.
|
||||||
|
:param plot: if True will plot the graph cost function depending on iteration
|
||||||
|
|
||||||
"""
|
"""
|
||||||
nb_iter = 0
|
nb_iter = 0
|
||||||
if alpha is None:
|
if alpha is None:
|
||||||
alpha = self._alpha
|
alpha = self._alpha
|
||||||
|
# forward propagation
|
||||||
self.propagate()
|
self.propagate()
|
||||||
if plot:
|
if plot:
|
||||||
y=[]
|
y=[]
|
||||||
x=[]
|
x=[]
|
||||||
for i in range(max_iter):
|
for i in range(max_iter):
|
||||||
J = self.back_propagation(True)
|
nb_iter = i + 1
|
||||||
|
# compute cost function
|
||||||
|
J = self.compute_cost_function()
|
||||||
if plot:
|
if plot:
|
||||||
y.append(J)
|
y.append(J)
|
||||||
x.append(nb_iter)
|
x.append(nb_iter)
|
||||||
|
# back propagation
|
||||||
|
self.back_propagation()
|
||||||
|
# forward propagation
|
||||||
self.propagate()
|
self.propagate()
|
||||||
nb_iter = i + 1
|
|
||||||
if J <= min_cost:
|
if J <= min_cost:
|
||||||
break
|
break
|
||||||
if mp and plot:
|
if mp and plot:
|
||||||
|
|
Loading…
Reference in New Issue