在CNN代码中添加图像增强以提高准确性

问题描述 投票:0回答:1

我的实验是用CNN分类蟾蜍和蛇。从互联网收集的每个物种的图像数据大小为4000。我的测试准确率达到了78%到80%。 ()对原始代码进行了修改,以添加混淆矩阵的测试数据)。现在,我想在代码中添加常规数据预处理,但是不知道如何以适当的方式在现有代码中进行处理。如何有效地添加数据后处理-

#Image Generator
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   rotation_range=40,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True,
                                   fill_mode='nearest')

test_datagen = ImageDataGenerator(rescale=1. / 255)

X_train = train_datagen.flow_from_directory(DATADIR,
                                            target_size=(IMG_SIZE, IMG_SIZE),
                                            batch_size=32,
                                            class_mode='categorical')

X_test = test_datagen.flow_from_directory(test,
                                          target_size=(IMG_SIZE, IMG_SIZE),
                                          batch_size=32,
                                          class_mode='categorical')

                *****existing CNN code is-*****

DATADIR= "C:\\Users\sazi\Desktop\snake&toad"
test="C:\\Users\sazi\Desktop\test"

CATEGORIES = ["snake", "toad"]
test_categories=["snake_test", "toad_test"]

IMG_SIZE = 60
training_data = []

#for training 
def create_training_data():
    for category in CATEGORIES:  # do toads and snakes
        path = os.path.join(DATADIR,category)  # create path to  toads and snakes
        class_num = CATEGORIES.index(category)  # get the classification  (0 or a 1). 0=snake 1=toad
        print (class_num)
        for img in tqdm(os.listdir(path)):  # iterate over each image per toads and snakes
            try:
                img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)  
                new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))                 
                training_data.append([new_array, class_num])  # add this to our training_data
            except Exception as e:  # in the interest in keeping the output clean...
                pass

create_training_data()

#print(len(training_data))
#print(len(test))
#for testing.. dont know do I need to do this part of not
test_data = []

def create_test_data():
    for category in test_categories:  # do toads and snakes

        path = os.path.join(test,category)  # create path to toads and snakes
        class_num = test_categories.index(category)  #classification  (0 or a 1). 0=snake 1=toad

        for img in tqdm(os.listdir(path)):  # iterate over each image per toads and snakes
            try:
                img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) 
                test_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
                test_data.append([test_array, class_num])  # add this to our training_data
            except Exception as e:  # in the interest in keeping the output clean...
                pass

create_test_data()
#print(len(test_data))

import random

random.shuffle(training_data)
for sample in training_data[:5]:
    print(sample[1])

X_train = []
y_train = []

for features,label in training_data:
    X_train.append(features)
    y_train.append(label)

print(X_train[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))

X_train = np.array(X_train).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_train = np.array(y_train)

X_train = X_train/255.0

X_test = []
y_test = []

for features,label in test_data:
    X_test.append(features)
    y_test.append(label)

print(X_test[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))

X_test = np.array(X_test).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_test = np.array(y_test)
X_test = X_test/255.0

model = Sequential()

model.add(Conv2D((32), (3, 3), input_shape=(IMG_SIZE, IMG_SIZE, 1)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D((32), (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D((64), (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))

model.add(Conv2D((64), (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))

model.add(Flatten())  # this converts our 3D feature maps to 1D feature vectors

model.add(Dropout(0.5))

model.add(Dense(512))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['acc'])

history=model.fit(X_train, y_train, batch_size=32, epochs=50, validation_split=0.3)
python machine-learning image-processing keras classification
1个回答
0
投票

发布您收到的完整错误。那将很容易为您提供帮助。 (无法评论)

© www.soinside.com 2019 - 2024. All rights reserved.