从零开始实现的C ++神经网络不能在MNIST上达到50%以上

问题描述 投票:0回答:1

因此,我已经使用Eigen在C ++中实现了一个完全连接的隐藏层神经网络,用于矩阵乘法。它使用小批量梯度下降。

但是,我的模型在mnist上的准确度不能超过50%。我曾尝试将学习率设置在0.0001到10之间。该模型在<100的训练量上确实过拟合(准确度约为90%,这仍然很差),尽管速度非常慢。

什么原因导致这种低准确性和极慢的学习?我主要担心的是反向传播是不正确的。此外,我宁愿不添加任何其他优化技术(学习率进度表,正则化等)。

前馈和反向传播代码:

z1 = (w1 * mbX).colwise() + b1;
a1 = sigmoid(z1);

z2 = (w2 * a1).colwise() + b2;
a2 = sigmoid(z2);

MatrixXd err = ((double) epsilon)/((double) minibatch_size) * ((a2 - mbY).array() * sigmoid_derivative(z2).array()).matrix();

b2 = b2 - err * ones;
w2 = w2 - (err * a1.transpose());

err = ((w2.transpose() * err).array() * sigmoid_derivative(z1).array()).matrix();

b1 = b1 - err * ones;
w1 = w1 - (err * mbX.transpose());

完整程序代码:

#include <iostream>
#include <fstream>
#include <math.h>
#include <cstdlib>
#include <Eigen/Dense>
#include <vector>
#include <string>

using namespace Eigen;

#define N 30
#define epsilon 0.7
#define epoch 1000

//sizes
const int minibatch_size = 10;
const int training_size = 10000;
const int val_size = 10;

unsigned int num, magic, rows, cols;

//images

unsigned int image[training_size][28][28];
unsigned int val_image[val_size][28][28];

//labels

unsigned int label[training_size];
unsigned int val_label[val_size];

//inputs

MatrixXd X(784, training_size);
MatrixXd Y = MatrixXd::Zero(10, training_size);

//minibatch

MatrixXd mbX(784, minibatch_size);
MatrixXd mbY = MatrixXd::Zero(10, minibatch_size);

//validation

MatrixXd Xv(784, val_size);
MatrixXd Yv = MatrixXd::Zero(10, val_size);

//Image processing courtesy of https://stackoverflow.com/users/11146076/%e5%bc%a0%e4%ba%91%e9%93%ad

unsigned int in(std::ifstream& icin, unsigned int size) {
    unsigned int ans = 0;
    for (int i = 0; i < size; i++) {
        unsigned char x;
        icin.read((char*)&x, 1);
        unsigned int temp = x;
        ans <<= 8;
        ans += temp;
    }
    return ans;
}
void input(std::string ipath, std::string lpath, std::string ipath2, std::string lpath2) {
    std::ifstream icin;

    //training data
    icin.open(ipath, std::ios::binary);
    magic = in(icin, 4), num = in(icin, 4), rows = in(icin, 4), cols = in(icin, 4);
    for (int i = 0; i < training_size; i++) {
        int val = 0;
        for (int x = 0; x < rows; x++) {
            for (int y = 0; y < cols; y++) {
                image[i][x][y] = in(icin, 1);
                X(val, i) = image[i][x][y]/255;
                val++;
            }
        }
    }
    icin.close();

    //training labels
    icin.open(lpath, std::ios::binary);
    magic = in(icin, 4), num = in(icin, 4);
    for (int i = 0; i < training_size; i++) {
        label[i] = in(icin, 1);
        Y(label[i], i) = 1;
    }
    icin.close();

    //validation data
    icin.open(ipath2, std::ios::binary);
    magic = in(icin, 4), num = in(icin, 4), rows = in(icin, 4), cols = in(icin, 4);
    for (int i = 0; i < val_size; i++) {
        int val = 0;
        for (int x = 0; x < rows; x++) {
            for (int y = 0; y < cols; y++) {
                val_image[i][x][y] = in(icin, 1);
                Xv(val, i) = val_image[i][x][y]/255;
                val++;
            }
        }
    }
    icin.close();

    //validation labels
    icin.open(lpath2, std::ios::binary);
    magic = in(icin, 4), num = in(icin, 4);
    for (int i = 0; i < val_size; i++) {
        val_label[i] = in(icin, 1);
        Yv(val_label[i], i) = 1;
    }

    icin.close();
}

//Neural Network calculations

MatrixXd sigmoid(MatrixXd m) {
    m *= -1;
    return (1/(1 + m.array().exp())).matrix();
}

MatrixXd sigmoid_derivative(MatrixXd m) {
    return (sigmoid(m).array() * (1 - sigmoid(m).array())).matrix();
}


//Initialize weights and biases

//hidden layer
VectorXd b1 = MatrixXd::Zero(N, 1);
MatrixXd w1 = MatrixXd::Random(N, 784);


//output
VectorXd b2 = MatrixXd::Zero(10, 1);
MatrixXd w2 = MatrixXd::Random(10, N);

//Initialize intermediate values
MatrixXd z1, z2, a1, a2, z1v, z2v, a1v, a2v;
MatrixXd ones = MatrixXd::Constant(minibatch_size, 1, 1);


int main() {
    input("C:\\Users\\Aaron\\Documents\\Test\\train-images-idx3-ubyte\\train-images.idx3-ubyte", "C:\\Users\\Aaron\\Documents\\Test\\train-labels-idx1-ubyte\\train-labels.idx1-ubyte", "C:\\Users\\Aaron\\Documents\\Test\\t10k-images-idx3-ubyte\\t10k-images.idx3-ubyte", "C:\\Users\\Aaron\\Documents\\Test\\t10k-labels-idx1-ubyte\\t10k-labels.idx1-ubyte");

    std::cout << "Finished Image Processing" << std::endl;

    //std::cout << w1 << std::endl;

    std::vector<double> val_ac;
    std::vector<double> c;

    std::vector<int> order;

    for (int i = 0; i < training_size; i++) {
        order.push_back(i);
    }

    for (int i = 0; i < epoch; i++) {
        //feed forward

        std::random_shuffle(order.begin(), order.end());

        for (int j = 0; j < training_size/minibatch_size; j++) {
            for (int k = 0; k < minibatch_size; k++) {
                int index = order[j * minibatch_size + k];
                mbX.col(k) = X.col(index);
                mbY.col(k) = Y.col(index);
            }

            z1 = (w1 * mbX).colwise() + b1;
            a1 = sigmoid(z1);

            z2 = (w2 * a1).colwise() + b2;
            a2 = sigmoid(z2);

            MatrixXd err = ((double) epsilon)/((double) minibatch_size) * ((a2 - mbY).array() * sigmoid_derivative(z2).array()).matrix();

            //std::cout << err << std::endl;
            b2 = b2 - err * ones;
            w2 = w2 - (err * a1.transpose());

            err = ((w2.transpose() * err).array() * sigmoid_derivative(z1).array()).matrix();

            //std::cout << err << std::endl;

            b1 = b1 - err * ones;
            w1 = w1 - (err * mbX.transpose());

        }

        //validation

        z1 = (w1 * X).colwise() + b1;
        a1 = sigmoid(z1);

        z2 = (w2 * a1).colwise() + b2;
        a2 = sigmoid(z2);

        double cost = 1/((double) training_size) * ((a2 - Y).array() * (a2 - Y).array()).matrix().sum();

        c.push_back(cost);

        int correct = 0;

        for (int i = 0; i < training_size; i++) {
            double maxP = -1;
            int na;
            for (int j = 0; j < 10; j++) {
                if (a2(j, i) > maxP) {
                    maxP = a2(j, i);
                    na = j;
                }
            }
            if (na == label[i]) correct++;
        }

        val_ac.push_back(((double) correct) / ((double) training_size));

        std::cout << "Finished Epoch " << i + 1 << std::endl;
        std::cout << "Cost: " << cost << std::endl;
        std::cout << "Accuracy: " << ((double) correct) / ((double) training_size) << std::endl;
    }


    //plot accuracy

    FILE * gp = _popen("gnuplot", "w");
    fprintf(gp, "set terminal wxt size 600,400 \n");
    fprintf(gp, "set grid \n");
    fprintf(gp, "set title '%s' \n", "NN");
    fprintf(gp, "plot '-' w line, '-' w lines \n");

    for (int i = 0; i < epoch; i++) {
        fprintf(gp, "%f %f \n", i + 1.0, c[i]);
    }
    fprintf(gp, "e\n");

    //validation accuracy
    for (int i = 0; i < epoch; i++) {
        fprintf(gp, "%f %f \n", i + 1.0, val_ac[i]);
    }
    fprintf(gp, "e\n");

    fflush(gp);


    system("pause");
    _pclose(gp);

    return 0;
}

UPD

这里是训练数据集的准确性(绿色)和损失(紫色)的图形

https://i.stack.imgur.com/Ya2yR.png

这里是训练数据和验证数据的损失图:

https://imgur.com/a/4gmFCrk

验证数据的损失正在增加到一定点,这表明存在过度拟合的迹象。但是,即使在训练数据上,准确性仍然很差。

c++ machine-learning image-processing neural-network eigen
1个回答
0
投票
unsigned int val_image[val_size][28][28];    

Xv(val, i) = val_image[i][x][y]/255;

您可以用Xv(val, i) = val_image[i][x][y] / 255.0;重试吗

使用已编写的代码,Xv通常为0,当图像的值为255时为1。如果不进行浮点除法,则将获得0.0到1.0之间的值。

您需要检查代码中可能要除整数的其他位置。

注:在C ++中,240/255为0。

© www.soinside.com 2019 - 2024. All rights reserved.