具有反向传播的异或门

问题描述 投票:3回答:1

我试图了解反向传播的工作原理。因此,在编写通用算法之前,我写了一个简单的脚本来尝试理解它。

脚本试图做的是训练XOR门。我的神经网络非常简单。 2个输入,2个隐藏的神经元和1个输出。 (请注意,为简单起见,省略了偏差)

enter image description here

((有关更多信息,请参见末尾的图像)

问题是,在训练感知器后,它不起作用,我也不知道问题出在哪里。它可以出现在我的方程式或我的实现中。

代码:

    def xor(self):
        print('xor')
        X = np.array([[1,1],[1,0],[0,1],[0,0]]) #X.shape = (4,2)
        y = np.array([0,1,1,0])
        w0 = np.array([[.9,.1],[.3,.5]]) #random weights layer0
        w1 = np.array([.8,.7]) #random wights layer1

        #forward pass
        youtput=[]
        for i in range(X.shape[0]):#X.shape = (4,2)
            #print('x0', X[i][0])
            #print('x1', X[i][1])
            h0 = self.sig(w0[0,0]*X[i][0] + w0[1,0]*X[i][1])
            h1 = self.sig(w0[0,1]*X[i][0] + w0[1,1]* X[i][1])
            y0 = self.sig(w1[0]* h0 + w1[1] * h1) # shape = (4,)
            youtput.append(y0)
            print('y0',y0)

            #backpropagation
            dey0 = -(y[i]-y0) # y[i] -> desired output | y0 -> output
            deW0_00 = dey0 * y0 * (1 - y0) * w1[0] * h0 * (1 - h0) * X[i][0]
            deW0_01 = dey0 * y0 * (1 - y0) * w1[1] * h1 * (1 - h1) * X[i][0]
            deW0_10 = dey0 * y0 * (1 - y0) * w1[0] * h0 * (1 - h0) * X[i][1]
            deW0_11 = dey0 * y0 * (1 - y0) * w1[1] * h1 * (1 - h1) * X[i][1]
            deW1_00 = dey0 * h0
            deW1_10 = dey0 * h1

            w0[0,0] = self.gradient(w0[0,0], deW0_00)
            w0[0,1] = self.gradient(w0[0,1], deW0_01)
            w0[1,0] = self.gradient(w0[1,0], deW0_10)
            w0[1,1] = self.gradient(w0[1,1], deW0_11)
            w1[0] = self.gradient(w1[0], deW1_00)
            w1[1] = self.gradient(w1[1], deW1_10)

            #print('print W0, ', w0)
            #print('print W1, ', w1)
        print('error -> ', self.error(y,youtput ))  
        #forward pass
        youtput2= []
        for i in range(X.shape[0]):#X.shape = (4,2)
            print('x0 =', X[i][0], ', x1 =', X[i][1])
            h0 = self.sig(w0[0,0]*X[i][0] + w0[1,0]*X[i][1])
            h1 = self.sig(w0[0,1]*X[i][0] + w0[1,1]* X[i][1])
            y0 = self.sig(w1[0]* h0 + w1[1] * h1)
            youtput2.append(y0)
            print('y0----->',y0)
        print('error -> ', self.error(y,youtput2 ))

    def gradient(self, w, w_derivative):
        alpha = .001
        for i in range(1000000):
            w = w - alpha * w_derivative
        return w

    def error(self, y, yhat):
        e = 0
        for i in range (y.shape[0]):
            e = e + .5 * (y[i]- yhat[i])**2
        return e 

    def sig(self,x):
         return 1 / (1 + math.exp(-x)) 

结果

PS C:\gitProjects\perceptron> python .\perceptron.py
xor
y0 0.7439839341840395
y0 0.49999936933995615
y0 0.4999996364775347
y0 7.228146514841657e-229
error ->  0.5267565442535
x0 = 1 , x1 = 1
y0-----> 0.49999999999999856
x0 = 1 , x1 = 0
y0-----> 0.4999993695274945
x0 = 0 , x1 = 1
y0-----> 0.49999963653435153
x0 = 0 , x1 = 0
y0-----> 7.228146514841657e-229
error ->  0.3750004969693411

等式。enter image description hereenter image description hereenter image description hereenter image description here

python backpropagation
1个回答
2
投票

只是改变了您“循环”的方式,现在看来工作正常(下面修改了代码)。

我可能已经错过了一些东西,但您的后置支撑器看起来还不错

import numpy as np
import math

class perceptronmonocouche(object):
    def xor(self):
        print('xor')
        X = np.array([[1,1],[1,0],[0,1],[0,0]]) #X.shape = (4,2)
        y = np.array([0,1,1,0])
        w0 = np.array([[.9,.1],[.3,.5]]) #random weights layer0
        w1 = np.array([.8,.7]) #random wights layer1
        max_epochs = 10000
        epochs = 0
        agreed_convergence_error = 0.001
        error = 1
        decision_threshold = 0.5

        while epochs <= max_epochs or error < agreed_convergence_error:
            #forward pass
            epochs += 1
            youtput=[]
            for i in range(X.shape[0]):#X.shape = (4,2)
                #print('x0', X[i][0])
                #print('x1', X[i][1])
                h0 = self.sig(w0[0,0]*X[i][0] + w0[1,0]*X[i][1])
                h1 = self.sig(w0[0,1]*X[i][0] + w0[1,1]* X[i][1])
                y0 = self.sig(w1[0]* h0 + w1[1] * h1) # shape = (4,)
                youtput.append(y0)
                if epochs%1000 ==0:
                    print('y0',y0)
                    if y0 > decision_threshold:
                        prediction = 1
                    else:
                        prediction = 0
                    print('real value', y[i])
                    print('predicted value', prediction)

                #backpropagation
                dey0 = -(y[i]-y0) # y[i] -> desired output | y0 -> output            
                dew0_00 = dey0 * y0 * (1 - y0) * w1[0] * h0 * (1 - h0) * X[i][0]
                dew0_01 = dey0 * y0 * (1 - y0) * w1[1] * h1 * (1 - h1) * X[i][0]
                dew0_10 = dey0 * y0 * (1 - y0) * w1[0] * h0 * (1 - h0) * X[i][1]
                dew0_11 = dey0 * y0 * (1 - y0) * w1[1] * h1 * (1 - h1) * X[i][1]
                dew1_0 = dey0 * h0
                dew1_1 = dey0 * h1

                w0[0,0] = self.gradient(w0[0,0], dew0_00)
                w0[0,1] = self.gradient(w0[0,1], dew0_01)
                w0[1,0] = self.gradient(w0[1,0], dew0_10)
                w0[1,1] = self.gradient(w0[1,1], dew0_11)
                w1[0] = self.gradient(w1[0], dew1_0)
                w1[1] = self.gradient(w1[1], dew1_1)

                #print('print W0, ', w0)
                #print('print W1, ', w1)
            if epochs%1000 ==0:
                print('error -> ', self.error(y,youtput ))

    def gradient(self, w, w_derivative):
        alpha = .2
        w = w - alpha * w_derivative
        return w

    def error(self, y, yhat):
        e = 0
        for i in range (y.shape[0]):
            e = e + .5 * (y[i]- yhat[i])**2
        return e 

    def sig(self,x):
         return 1 / (1 + math.exp(-x))

p = perceptronmonocouche()
p.xor()

结果

y0 0.05892656406522486
real value 0
predicted value 0
y0 0.9593864604895951
real value 1
predicted value 1
y0 0.9593585562506973
real value 1
predicted value 1
y0 0.03119936553811551
real value 0
predicted value 0
error ->  0.003873463452052477

几下remaks

  • 似乎您显示的图形既不完全与您的代码相对应,也不与能够学习XOR门的感知器相对应。 XOR inputs are not linearly separable,因为它需要两条直线才能操作请求的分隔。您的代码(以及能够进行这种分离的感知器的图形)应如下所示:

enter image description here

  • 偏见已被省略
© www.soinside.com 2019 - 2024. All rights reserved.