完成第一个神经网络后下一步是什么?

问题描述 投票:1回答:1

我是youtube频道3Blue1Brown的忠实拥护者,他在神经网络上的系列确实使我对此感到兴奋。我决定从头开始在Python中创建自己的神经网络,并深深地从事数学工作。因此,借助MNIST数据库中有关手写数字的帮助,我开始学习并在2周后成功完成了任务。从那时起,我就一直在进一步开发代码,以便可以在代码内整齐地调整神经元和隐藏层的数量。我还尝试了不同的激活功能。我获得的最佳精度约为95%,其中包含16个神经元的2个隐藏层和5分钟的训练。

现在,我的问题还很模糊,但是我现在正在寻找该地区的下一个挑战,你们有什么建议吗?

我现在已经建立了框架,所以我会喜欢带有更大数据集或某些东西的新型问题,或者我应该对现有问题进行更多的工作以进一步提高输出的准确性吗?

你们怎么看?

此致,埃米尔

((如果有兴趣,这里是代码)

import pickle
import gzip
import numpy as np
import random
import time

import pickle
import gzip
import numpy as np
import random
import time

class mnistClass:
    def __init__(self, inputAmount=784, layers=2, layerSize=16, outputSize=10, loops=1, sampleSize=100):
        with gzip.open('mnist.pkl.gz', 'rb') as f:
            train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
        self.A, self.y = train_set
        self.V, self.v2 = valid_set
        self.dataSize = len(self.A)
        self.inputAmount = inputAmount
        self.layers = layers
        self.layerSize = layerSize
        self.outputSize = outputSize
        self.loops = loops
        self.sampleSize = sampleSize
        self.iterations = int(self.dataSize/self.sampleSize)
        self.clock = time.time()
        self.Weights = []
        self.Biases = []
        self.initializeArrays()
        self.initializeTraining()
        print("Accuracy: " + str(self.getAccuracy()) + "%")

    def initializeArrays(self):
        for i in range(self.layers):
            if self.layers - i > 2:     #Adding middle layers
                self.Weights.append(np.random.rand(self.layerSize, self.layerSize)-0.5) 
            if self.layers - i > 1: 
                self.Biases.append(np.random.rand(self.layerSize)-0.5)
        if self.layers > 1:
            self.Weights.insert(0, np.random.rand(self.layerSize, self.inputAmount)-0.5)
            self.Weights.insert(len(self.Weights), np.random.rand(self.outputSize, self.layerSize)-0.5)
        else:
            self.Weights.insert(len(self.Weights), np.random.rand(self.outputSize, self.inputAmount)-0.5)
        self.Biases.insert(len(self.Biases), np.random.rand(self.outputSize)-0.5)

    def sigmoid(self, x, shiftType):
        if shiftType == 0:
            result = 1/(1+np.exp(-x))
        elif shiftType == 1:
            result = 2 * (1/(1+np.exp(-x))) - 1
        return result

    def sigmoidPrime(self, x, shiftType):
        if shiftType == 0:
            result = self.sigmoid(x, 0) - self.sigmoid(x, 0)**2
        elif shiftType == 1:
            result = 2*np.exp(-x)/(1+np.exp(-x))**2
        return result

    def Rdependance(self, Z, layer1, layer2, multi=False):  #How R depends on a preceeding R
        multi = layer1-layer2 > 1
        if not multi:
            if layer1 == self.layers-1:
                shiftType = 0
            else:
                shiftType = 1           
            R1_R2_differential = np.multiply(self.Weights[layer1], self.sigmoidPrime(Z[layer1]+self.Biases[layer1], shiftType)[:, np.newaxis])
            result = R1_R2_differential
        else:
            chainRule = []
            for i in reversed(range(layer2, layer1)):
                chainRule.append(self.Rdependance(Z, i+1, i))
            result = chainRule[0]
            for i in range(len(chainRule)-1):
                result = np.dot(result, chainRule[i+1])
        return result

    def RWdependance(self, R, Z, dataCaseNo, layer):   #How R depends on connecting Weights
        if layer == self.layers-1:
            shiftType = 0
        else:
            shiftType = 1
        R_W_differential = self.Weights[layer]/self.Weights[layer]
        mergeW_Z = np.multiply(R_W_differential, self.sigmoidPrime(Z[layer]+self.Biases[layer], shiftType)[:, np.newaxis])
        if layer == 0:
            R_W_differential = np.multiply(mergeW_Z.T, self.A[dataCaseNo][:, np.newaxis]).T
        else:
            R_W_differential = np.multiply(mergeW_Z.T, R[layer-1][:, np.newaxis]).T
        return R_W_differential

    def RBdependance(self, Z, layer):   #How R depends on internal Biases
        if layer == self.layers-1:
            shiftType = 0
        else:
            shiftType = 1
        R_B_differential = np.multiply(self.Rdependance(Z, self.layers-1, layer).T, self.sigmoidPrime(Z[layer]+self.Biases[layer], shiftType)[:, np.newaxis]).T
        return R_B_differential

    def integralWeightCost(self, R, Z, dataCaseNo, quadDifferential, layer): # Cost of system for weights
        if layer == self.layers-1:
            nodes = np.identity(self.outputSize)
        else:
            nodes = self.Rdependance(Z, self.layers-1, layer)
        cost_differential = np.multiply(nodes, quadDifferential[:, np.newaxis])
        cost_differential = np.sum(cost_differential, 0)
        result = np.multiply(self.RWdependance(R, Z, dataCaseNo, layer), cost_differential[:, np.newaxis])
        return result

    def integralBiasCost(self, Z, quadDifferential, layer): # Cost of system for biases
        if layer == self.layers-1:
            nodes = np.identity(self.outputSize)
        else:
            nodes = self.RBdependance(Z, layer)
        cost_differential = np.multiply(nodes, quadDifferential[:, np.newaxis])
        result = np.sum(cost_differential, 0)
        return result



    def initializeTraining(self):
        for loop in range(self.loops):
            for iteration in range(self.iterations):
                avg_cost = 0
                avg_deltaWeights = []
                avg_deltaBiases = []
                for i in range(len(self.Weights)):  #Creating zeros of weight arrays           
                    avg_deltaWeights.append(self.Weights[i]*0)
                for i in range(len(self.Biases)):                 
                    avg_deltaBiases.append(self.Biases[i]*0)
                for dataCaseNo in range(iteration*self.sampleSize, iteration*self.sampleSize + self.sampleSize):
                    if self.layers == 1:
                        shiftType = 0
                    else:
                        shiftType = 1
                    Y1 = np.zeros(self.outputSize)
                    Y1[self.y[dataCaseNo]] = 1
                    Z = []
                    Z.append(np.dot(self.Weights[0], self.A[dataCaseNo]))
                    R = []
                    R.append(self.sigmoid(Z[0]+self.Biases[0], shiftType))
                    for i in range(1, self.layers):
                        if i == self.layers-1:
                            shiftType = 0
                        else:
                            shiftType = 1
                        Z.append(np.dot(self.Weights[i], R[i-1]))
                        R.append(self.sigmoid(Z[i]+self.Biases[i], shiftType))

                    C = np.sum((R[-1] - Y1)**2)
                    avg_cost += C
                    quadDifferential = 2 * (R[-1]-Y1)

                    for i in range(self.layers):
                        avg_deltaWeights[i] += self.integralWeightCost(R, Z, dataCaseNo, quadDifferential, i)
                        avg_deltaBiases[i] += self.integralBiasCost(Z, quadDifferential, i)

                avg_cost = avg_cost/self.sampleSize
                for i in range(self.layers):
                    self.Weights[i] = self.Weights[i] - avg_deltaWeights[i]/self.sampleSize
                    self.Biases[i] = self.Biases[i] - avg_deltaBiases[i]/self.sampleSize
                print("Average cost: " + str(round(avg_cost, 4)))
            print("\n" + "*"*25 + " " + str(loop+1) +" " + "*"*25 + "\n")
        executionEndTime = round((time.time() - self.clock), 2)
        print("Completed " + str(self.loops) + " rounds of " + str(self.sampleSize*self.iterations) + " samples (sampleSize: " + str(self.sampleSize) + "), " + " in " + str(executionEndTime) + " seconds..")
        print("Layers: " + str(self.layers))
        print("Middle layer nodes: " + str(self.layerSize))
        print("Input amount: " + str(self.inputAmount))
        amountVariables = 0
        for i in range(self.layers):
            amountVariables += self.Weights[i].size
            amountVariables += self.Biases[i].size
        print("Variables: " + str(amountVariables))
        print("Output size: " + str(self.outputSize))
        time.sleep(2)

    def getAccuracy(self):
        runs = 10000
        correct = 0
        print("Testing validation set accuracy over " + str(runs) + " samples...\n")
        for i in range(runs):
            if self.layers == 1:
                shiftType = 0
            else:
                shiftType = 1
            ran = i
            Y1 = np.zeros(self.outputSize)
            Y1[self.v2[ran]] = 1
            Z = []
            Z.append(np.dot(self.Weights[0], self.V[ran]))
            R = []
            R.append(self.sigmoid(Z[0]+self.Biases[0], shiftType))
            for i in range(1, self.layers):
                if i == self.layers-1:
                    shiftType = 0
                else:
                    shiftType = 1
                Z.append(np.dot(self.Weights[i], R[i-1]))
                R.append(self.sigmoid(Z[i]+self.Biases[i], shiftType))

            result = np.where(R[-1] == np.amax(R[-1]))
            maxNum = result[0][0]
            if int(self.v2[ran]) == int(maxNum):
                correct += 1

        accuracy = correct*100/runs
        return accuracy              


instance = mnistClass(784, 3, 16, 10, 2, 100)
#(input, layers, layer size, output, loops, sample subsize)

#input          - amount of nodes in input data
#layers         - amount of layers including last output layer but not first input layer
#layer size     - amount of nodes in hidden layers
#output         - amount of nodes in output layer
#loops          - how many times to train through the entire data set
#sample subsize - what quantity of data samples to average the gradient on
python neural-network mnist
1个回答
0
投票

我很高兴听到有关ML(特别是DL)领域的新面孔,您说自己已经取得了很大的成就,所以首先要致敬。现在,关于您的问题,我建议您退后一步,了解数据探索和特征提取的概念,为什么这些特征很重要,以及我如何建议您这样做,方法是浏览一些有关机器学习的kaggle教程,从那里对数据集进行一些基本分类,例如泰坦尼克号数据集等...

https://www.kaggle.com/learn/overview参加“进入机器学习”。

祝你好运!

© www.soinside.com 2019 - 2024. All rights reserved.