如何让我的逻辑回归更快

问题描述 投票:0回答:1

我必须做简单的逻辑回归(仅在numpy中,我不能使用pytorch或tensorflow)。

数据:MNIST的一部分

目标:我应该准确率达到86%。

不幸的是,我只有大约70%,而且我的损失函数奇怪地振荡。

它必须是函数错误:t_cross_entropy或np_cross_entropy_grad

当然,我试图改变学习率,但没有任何令人满意的结果。

你能帮忙吗? (下面有代码和图表)

我只能更改函数:np_linear,np_softmax,np_cross_entropy,np_cross_entropy_grad(最终只能在类NumpyLogisticRegression中转发函数)

1.加载MINST的一部分

# Import MNIST dataset

import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm

%matplotlib inline

def load_dataset(dataset_name):
    data = np.load('data/{}/{}.npz'.format(dataset_name.upper(), 
dataset_name))
    return data['X_train'], data['y_train'], data['X_test'], 
data['y_test']

X_train, y_train, X_test, y_test = load_dataset('mini_mnist')

f, ax = plt.subplots(1, 10, sharex='col', sharey='row',figsize=(18, 16))
for a in ax:
    a.imshow(X_train[np.random.randint(X_train.shape[0])].reshape(28, 
28), cmap='gray')
plt.show()

X_train = np.c_[np.ones(X_train.shape[0]), X_train]
X_test = np.c_[np.ones(X_test.shape[0]), X_test]
print("train data shape: {}, test data shape: 
{}".format(X_train.shape, X_test.shape))

2.主要类和功能

def np_linear(x, a):
    return np.dot(x, a.transpose())
    '''
    Calculate l(x;a) in BxK

    :param x: Bx(D+1) input data
    :param a: Kx(D+1) weight matrix
    '''
def np_softmax(l):
    exps = np.exp(l - np.max(l))
    return exps / np.sum(exps)
   '''
    Calculate p(l) in BxK

    :param l: BxK logits
    '''

def np_cross_entropy(p, y):
    m = y.shape[0]
    log_likelihood = -np.log(p[range(m),y])
    loss = np.sum(log_likelihood) / m
    return loss
    '''
    Calculate L(p,y)

    :param p: BxK predictions
    :param y: B true labels
    '''

def np_cross_entropy_grad(p, y, x):
    m = y.shape[0]
    grad = p
    grad[range(m),y] -= 1
    grad = grad/m
    grad = grad.transpose()
    return np.dot(grad, x)

'''
Calculate dL/da in Kx(D+1)

:param p: BxK predictions
:param y: B true labels
:param x: Bx(D+1) input data
'''

class NumpyLogisticRegression:

    def __init__(self, n_classes, n_epochs, input_size, learning_rate=0.1, batch_size=256):
        self.A = np.zeros((n_classes, input_size))
        self.learning_rate = learning_rate
        self.batch_size = batch_size
        self.input_size = input_size
        self.n_classes = n_classes
        self.n_epochs = n_epochs

    def forward(self, x):
        return np_softmax(np_linear(x, self.A))

    def train(self, X, Y, X_test=None, y_test=None):
        loss, train_accuracy, test_accuracy = [], [], []

        for e in tqdm(range(self.n_epochs)):

            perm = np.random.permutation(len(X))
            X, Y, = X[perm], Y[perm]

            for batch in range(len(X) // self.batch_size):
                x = X[batch * self.batch_size:(batch + 1) * self.batch_size]
                y = Y[batch * self.batch_size:(batch + 1) * self.batch_size]

                p = self.forward(x)
                l = np_cross_entropy(p, y)

                loss.append(l)
                train_accuracy.append(self.test(x, y))
                if X_test is not None and y_test is not None:
                    test_accuracy.append(self.test(X_test, y_test))

                grad_A = np_cross_entropy_grad(p, y, x)
                self.A -= grad_A * self.learning_rate

        return loss, train_accuracy, test_accuracy

    def test(self, X, Y):
        p = np.argmax(self.forward(X), axis=1)
        return np.mean(p == Y)

3.测试

clf = NumpyLogisticRegression(n_classes=10, n_epochs=10, input_size=785)
loss, train_accuracy, test_accuracy = clf.train(X_train, y_train, X_test, y_test)

4.图表(没有代码,只有结果)

As you see sth is wrong...

numpy neural-network logistic-regression mnist cross-entropy
1个回答
0
投票

问题出在np_softmax函数中,它应该如下所示:

def np_softmax(l):
    exps = np.exp(l - np.max(l))
    return exps / np.sum(exps, axis=1).reshape(-1,1)

我准备了单向量参数,这是矩阵输入的正确版本。

© www.soinside.com 2019 - 2024. All rights reserved.