CNN的问题,MNIST

问题描述 投票:0回答:1

我想知道CNN的超参数如何影响准确性,所以我编写了此代码:


from numpy.random import seed
seed(1)
import tensorflow 
tensorflow.random.set_seed(2)
from keras import layers
from keras import models
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.utils import to_categorical
done=[[0 for i in range(7)] for j in range(100)]
done[0][0]='filter1'
done[0][1]='filter2'
done[0][2]='noyeau1'
done[0][3]='noyeau2'
done[0][4]='epochess'
done[0][5]='batch'
done[0][6]='precision'
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
y0=1
batch=[32,64,128]
epochss=[4,10]
noyeau1=[3,5,7]
noyeau2=[3,5,7]
filterr1=[32,64,128]
filterr2=[32,64,128]
y0=1
for i in range(3):
 for j in range(3):
    model = models.Sequential()
    model.add(layers.Conv2D(filterr1[i], (noyeau1[i], noyeau1[i]), activation='relu', input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(filterr2[j], (noyeau2[j],noyeau2[j]), activation='relu'))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(filterr2[j], (noyeau2[j],noyeau2[j]), activation='relu'))
    model.summary()
    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))  
    model.summary()
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    for s in range(2):
        for l in range(3):        
            history =model.fit(train_images, train_labels, epochs=epochss[s], batch_size=batch[l])
            test_loss, test_acc = model.evaluate(test_images, test_labels)
            print('test_acc',test_acc)
            done[y0][0]=filterr1[i]
            done[y0][1]=filterr2[j]
            done[y0][2]=noyeau1[i]
            done[y0][3]=noyeau2[j]
            done[y0][4]=epochss[s]
            done[y0][5]=batch[l]
            done[y0][6]=test_acc
            y0=y0+1

但是当我运行它时,出现此错误:

ValueError:负尺寸大小是由于输入形状为[?,3,3,32],[5,5,32,32]的'conv2d_135 / convolution'(op:'Conv2D')的3减去5引起的。

我不明白为什么,似乎第二隐藏层不接受(5,5)作为内核大小。谢谢

keras-layer mnist cnn
1个回答
0
投票
##import required packages

    import tensorflow as tf
    import keras
    from keras import layers
    from keras import models
    from keras import utils
    from keras.layers import Dense
    from keras.models import Sequential
    from keras.layers import Flatten
    from keras.layers import Dropout
    from keras.layers import Activation
    from keras.regularizers import l2
    from keras.optimizers import SGD
    from keras.optimizers import RMSprop
    from keras import datasets

    from keras.callbacks import LearningRateScheduler
    from keras.callbacks import History

    from keras import losses
    from sklearn.utils import shuffle
    import numpy as np

    print(tf.VERSION)
    print(tf.keras.__version__)
##loading the mnist data set

    mnist = keras.datasets.mnist
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    x_train.shape, y_train.shape
    print(f'We have {x_train.shape[0]} train samples')
    print(f'We have {x_test.shape[0]} test samples')
# Convert class vectors to binary class matrices

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)
# repeat some of the initial values here so we make sure they were not changed

    input_dim = x_train.shape[1]
    num_classes = 10
    import numpy
    from sklearn.model_selection import GridSearchCV
# let's create a function that creates the model (required for KerasClassifier) 
# while accepting the hyperparameters we want to tune 
# we also pass some default values such as optimizer='rmsprop'

    def create_model_2(optimizer='rmsprop', init='glorot_uniform'):
        model = Sequential()
        model.add(Dense(64, input_dim=input_dim, kernel_initializer=init, activation='relu'))
        model.add(Dropout(0.1))
        model.add(Dense(64, kernel_initializer=init, activation=tf.nn.relu))
        model.add(Dense(num_classes, kernel_initializer=init, activation=tf.nn.softmax))

# compile model

        model.compile(loss='categorical_crossentropy', 
                      optimizer=optimizer, 
                      metrics=['accuracy'])

        return model
    %%time

# fix random seed for reproducibility (this might work or might not work 
# depending on each library's implenentation)

    seed = 7
    np.random.seed(seed)
    import numpy as np

    # create the sklearn model for the network
    model_init_batch_epoch_CV = KerasClassifier(build_fn=create_model_2, verbose=1)

    # we choose the initializers that came at the top in our previous cross-validation!!
    init_mode = ['glorot_uniform', 'uniform'] 
    batches = [128, 512]
    epochs = [10, 20]

    # grid search for initializer, batch size and number of epochs
    param_grid = dict(epochs=epochs, batch_size=batches, init=init_mode)
    grid = GridSearchCV(estimator=model_init_batch_epoch_CV, 
                        param_grid=param_grid,
                        cv=3)
    grid_result = grid.fit(x_train, y_train)

# print results

    print(f'Best Accuracy for {grid_result.best_score_:.4} using {grid_result.best_params_}')
    means = grid_result.cv_results_['mean_test_score']
    stds = grid_result.cv_results_['std_test_score']
    params = grid_result.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print(f'mean={mean:.4}, std={stdev:.4} using {param}')
© www.soinside.com 2019 - 2024. All rights reserved.