我正在尝试使用numpy从头开始构建一个简单的神经网络类,并使用XOR
问题对其进行测试。但是反向传播功能(backprop)似乎无法正常工作。
[在类中,我通过传递每一层的大小以及在每一层使用的激活函数来构造实例。我假设最终的激活函数是softmax,因此我可以计算出交叉熵损失wrt到最后一层Z的导数。我的课堂上也没有单独的偏倚矩阵集。我只是将它们包含在权重矩阵中,作为结尾处的额外一列。
我知道我的反向传播函数无法正常工作,因为神经网络永远不会收敛于某种程度上正确的输出。在比较两者的结果时,我还创建了一个数值梯度函数。我得到的数字大不相同。
根据我的理解,每层的增量值(L为最后一层,i代表其他任何层)应为:
这些层的相应梯度/权重更新应为:
[*
是hardamard product时,a表示某层的激活,而z表示某层的未激活输出。
我用来测试的示例数据在文件的底部。
这是我第一次尝试从头开始实施反向传播算法。所以我对从这里去哪里有些迷茫。
import numpy as np
def sigmoid(n, deriv=False):
if deriv:
return np.multiply(n, np.subtract(1, n))
return 1 / (1 + np.exp(-n))
def softmax(X, deriv=False):
if not deriv:
exps = np.exp(X - np.max(X))
return exps / np.sum(exps)
else:
raise Error('Unimplemented')
def cross_entropy(y, p, deriv=False):
"""
when deriv = True, returns deriv of cost wrt z
"""
if deriv:
ret = p - y
return ret
else:
p = np.clip(p, 1e-12, 1. - 1e-12)
N = p.shape[0]
return -np.sum(y*np.log(p))/(N)
class NN:
def __init__(self, layers, activations):
"""random initialization of weights/biases
NOTE - biases are built into the standard weight matrices by adding an extra column
and multiplying it by one in every layer"""
self.activate_fns = activations
self.weights = [np.random.rand(layers[1], layers[0]+1)]
for i in range(1, len(layers)):
if i != len(layers)-1:
self.weights.append(np.random.rand(layers[i+1], layers[i]+1))
for j in range(layers[i+1]):
for k in range(layers[i]+1):
if np.random.rand(1,1)[0,0] > .5:
self.weights[-1][j,k] = -self.weights[-1][j,k]
def ff(self, X, get_activations=False):
"""Feedforward"""
activations, zs = [], []
for activate, w in zip(self.activate_fns, self.weights):
X = np.vstack([X, np.ones((1, 1))]) # adding bias
z = w.dot(X)
X = activate(z)
if get_activations:
zs.append(z)
activations.append(X)
return (activations, zs) if get_activations else X
def grad_descent(self, data, epochs, learning_rate):
"""gradient descent
data - list of 2 item tuples, the first item being an input, and the second being its label"""
grad_w = [np.zeros_like(w) for w in self.weights]
for _ in range(epochs):
for x, y in data:
grad_w = [n+o for n, o in zip(self.backprop(x, y), grad_w)]
self.weights = [w-(learning_rate/len(data))*gw for w, gw in zip(self.weights, grad_w)]
def backprop(self, X, y):
"""perfoms backprop for one layer of a NN with softmax/cross_entropy output layer"""
(activations, zs) = self.ff(X, True)
activations.insert(0, X)
deltas = [0 for _ in range(len(self.weights))]
grad_w = [0 for _ in range(len(self.weights))]
deltas[-1] = cross_entropy(y, activations[-1], True) # assumes output activation is softmax
grad_w[-1] = np.dot(deltas[-1], np.vstack([activations[-2], np.ones((1, 1))]).transpose())
for i in range(len(self.weights)-2, -1, -1):
deltas[i] = np.dot(self.weights[i+1][:, :-1].transpose(), deltas[i+1]) * self.activate_fns[i](activations[i], True)
grad_w[i] = np.hstack((np.dot(deltas[i], activations[max(0, i-1)].transpose()), deltas[i]))
# check gradient
num_gw = self.gradient_check(X, y, i)
print('numerical:', num_gw, '\nanalytic:', grad_w)
return grad_w
def gradient_check(self, x, y, i, epsilon=1e-4):
"""Numerically calculate the gradient in order to check analytical correctness"""
grad_w = [np.zeros_like(w) for w in self.weights]
for w, gw in zip(self.weights, grad_w):
for j in range(w.shape[0]):
for k in range(w.shape[1]):
w[j,k] += epsilon
out1 = cross_entropy(self.ff(x), y)
w[j,k] -= 2*epsilon
out2 = cross_entropy(self.ff(x), y)
gw[j,k] = np.float64(out1 - out2) / (2*epsilon)
w[j,k] += epsilon # return weight to original value
return grad_w
##### TESTING #####
X = [np.array([[0],[0]]), np.array([[0],[1]]), np.array([[1],[0]]), np.array([[1],[1]])]
y = [np.array([[1], [0]]), np.array([[0], [1]]), np.array([[0], [1]]), np.array([[1], [0]])]
data = []
for x, t in zip(X, y):
data.append((x, t))
def nn_test():
c = NN([2, 2, 2], [sigmoid, sigmoid, softmax])
c.grad_descent(data, 100, .01)
for x in X:
print(c.ff(x))
nn_test()
对于节点y_n
,其最终为y_n-t_n
。因此,对于具有输出的softmax:
[0.2,0.2,0.3,0.3]
和所需的输出:
[0,1,0,0]
每个softmax节点处的梯度为:
[0.2,-0.8,0.3,0.3]
好像您要从整个数组中减去1。变量名不是很清楚,所以如果您可以将它们从L
重命名为L
表示的名称,例如output_layer
,我将可以提供更多帮助。
此外,对于其他层来说,只是为了清除问题。当您以a^(L-1)
为例时,您是指“ a等于(l-1)的幂”还是“ a异或(l-1)”?因为在python中^
表示xor。