训练具有反推功能的前馈神经网络。

问题描述 投票:0回答:1

我实现了一个神经网络,它由(4个输入神经元-5个隐藏神经元-3个输出神经元)组成。我在Iris数据集上应用了这个学习算法。我使用z-score方法对数据进行了标准化。当我测试我的模型时,准确率从未超过67%。我怀疑我对反向传播算法的实现可能存在一些错误。这是我的第一个神经网络,所以请原谅任何愚蠢的错误:)。)

我试过改变学习率,改变迭代次数,以及调试背传播算法。

编辑:我没有使用任何正则化术语

def adjust_labels(label_matrix):
    final_label_matrix = np.ones((88, 3))
    item = 0
    while item < len(label_matrix):
        if label_matrix[item] == 0:
            final_label_matrix[item] = [0, 0, 0]
        elif label_matrix[item] == 1:
            final_label_matrix[item] = [0, 1, 0]
        else:
            final_label_matrix[item] = [0, 0, 1]
        item = item + 1
    return final_label_matrix


# neural network constants [x-matrix + weights]
x_matrix = np.transpose(x_matrix)  # define the x matrix
seed(1)
weight_matrix_1 = np.random.randn(4, 5)  # define the first weight matrix
weight_matrix_2 = np.random.randn(5, 3)  # define the second weight matrix
final_labels = adjust_labels(labels)

x_testing_matrix = np.transpose(x_testing_matrix)
x_test_labels = adjust_labels(x_test_labels)


def sigmoid(x):
    return 1/(1+np.exp(-x))


def sigmoid_prime(x):
    return np.exp(-x) / pow((1+np.exp(-x)), 2)


def forward_propagation(x, w1, w2, label_matrix, learning_rate, number_of_iterations):
    iteration = 0
    while iteration < number_of_iterations:
        # print(w1)
        # print("\n-----\n")
        z2 = np.dot(x, w1)
        a2 = sigmoid(z2)
        z3 = np.dot(a2, w2)
        a3 = sigmoid(z3)  # this represents the output of the network
        # print the loss the function
        error = loss_function(a3, label_matrix)
        # print("The error: ")
        # print(error)
        # print("\n-------\n")
        # back-propagation starts from here
        delta_3 = np.multiply(-(label_matrix-a3), sigmoid_prime(z3))
        dJW2 = np.dot(a2.transpose(), delta_3)
        delta_2 = np.dot(delta_3, w2.transpose()) * sigmoid_prime(z2)
        dJW1 = x.transpose() @ delta_2
        # update the weights
        w2 = w2 - (learning_rate * dJW2)
        w1 = w1 - (learning_rate * dJW1)
        iteration = iteration + 1  # update the counter :')
    return w1, w2


def finalize_prediction(output):
    final_result = np.zeros((88, 1))
    row = 0
    while row < len(output):
        temp_array = output[row]
        final_result[row] = np.argmax(temp_array)
        row = row + 1
    return final_result


def loss_function(prediction_matrix, label_matrix):
    loss = np.asarray(pow((label_matrix - prediction_matrix), 2))
    return (1/2) * loss.sum()


def check_working(x, w1, w2, label_matrix):
    # print(w1)
    # print("\n-----\n")
    print(len(x))
    iteration = 0
    success_counter = 0
    while iteration < len(x):
        z2 = np.dot(x[iteration], w1)
        a2 = sigmoid(z2)
        z3 = np.dot(a2, w2)
        a3 = sigmoid(z3)  # this represents the output of the network
        # let's check through code whether the answer gotten is right
        if np.argmax(a3) == np.argmax(label_matrix[iteration]):
            success_counter = success_counter + 1
        iteration = iteration + 1
    return success_counter

python machine-learning neural-network backpropagation
1个回答
© www.soinside.com 2019 - 2024. All rights reserved.