如何使用 TensorFlow 让神经网络对图像进行分类?

问题描述 投票:0回答:1

所以,我已经尝试过使用tensorflow将图像分类为掩模,但是当我尝试将其推广到图像而不是掩模时,我遇到了很多问题,例如尽管两个图像都是通过相同的算法进行预处理的,但张量大小存在问题。

我该怎么做?

import numpy as np
import tensorflow as tf
from tensorflow import keras
import cv2
from scipy import io
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split

images = []
for i in range(1,5):
    url = 'C:/dataset/photos/%04d.png'%(i);
    img = cv2.imread(url);
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB);
    images.append(tf.convert_to_tensor(img));

masks = []
for i in range(1,5):
    url = 'C:/dataset/masks/%04d.png'%(i);
    mask = cv2.imread(url);
    mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB);
    masks.append(tf.convert_to_tensor(mask));

base = keras.applications.DenseNet121(input_shape=[128,128,3], include_top=False, weights='imagenet');
skip_names = ['conv1/relu', # size 64*64
              'pool2_relu',  # size 32*32
              'pool3_relu',  # size 16*16
              'pool4_relu',  # size 8*8
              'relu'        # size 4*4
              ]
skip_outputs = [base.get_layer(name).output for name in skip_names]

downstack = keras.Model(inputs=base.input, outputs=skip_outputs)
downstack.trainable = False;

from tensorflow_examples.models.pix2pix import pix2pix
upstack = [pix2pix.upsample(512,3),
          pix2pix.upsample(256,3),
          pix2pix.upsample(128,3),
          pix2pix.upsample(64,3)];

inputs = keras.layers.Input(shape=[128,128,3])
down = downstack(inputs)
out = down[-1]
skips = reversed(down[:-1])
for up, skip in zip(upstack,skips):
    out = up(out)
     out= keras.layers.Concatenate()([out,skip])

out = keras.layers.Conv2DTranspose(3, 3, strides=2, padding='same', )(out);

unet = keras.Model(inputs=inputs, outputs=out)
#Ресайзаем картинки и маски
def resize_image(image):
    image = tf.cast(image, tf.float32)
    image = image/255.0
    image = tf.image.resize(image, (128,128))
    image = tf.reshape(image, [128,128,3]);
    return image;
def resize_mask(mask):
    '''mask = tf.expand_dims(mask, axis=-1)
    mask = tf.image.resize(mask, (128,128))
    mask = tf.cast(mask, tf.uint8)
    return mask'''
    mask = tf.cast(mask, tf.float32)
    mask = mask/255.0
    mask = tf.image.resize(mask, (128,128))
    mask = tf.reshape(mask, [128,128,3])
    return mask;

X = [resize_image(i) for i in images]
y = [resize_mask(m) for m in masks]

train_X, val_X,train_y, val_y = train_test_split(X,y, test_size=0.2, random_state=0)
train_X = tf.data.Dataset.from_tensor_slices(train_X)
val_X = tf.data.Dataset.from_tensor_slices(val_X)
train_y = tf.data.Dataset.from_tensor_slices(train_y)
val_y = tf.data.Dataset.from_tensor_slices(val_y)
train_X.element_spec, train_y.element_spec, val_X.element_spec, val_y.element_spec;

train = tf.data.Dataset.zip((train_X, train_y))
val = tf.data.Dataset.zip((val_X, val_y))

'''def brightness(img, mask):
    img = tf.image.adjust_brightness(img, 0.1)
    return img, mask

def gamma(img, mask):
    img = tf.image.adjust_gamma(img, 0.1)
    return img, mask

def hue(img, mask):
    img = tf.image.adjust_hue(img, -0.1)
    return img, mask
def crop(img, mask):
    img = tf.image.central_crop(img, 0.7)
    img = tf.image.resize(img, (128,128))
    mask = tf.image.central_crop(mask, 0.7)
    mask = tf.image.resize(mask, (128,128))
    #mask = tf.cast(mask, tf.uint8)
    return img, mask

def flip_hori(img, mask):
    img = tf.image.flip_left_right(img)
    mask = tf.image.flip_left_right(mask)
    return img, mask

def flip_vert(img, mask):
    img = tf.image.flip_up_down(img)
    mask = tf.image.flip_up_down(mask)
    return img, mask

def rotate(img, mask):
    img = tf.image.rot90(img)
    mask = tf.image.rot90(mask)
    return img, mask'''

'''a = train.map(brightness)
b = train.map(gamma)
c = train.map(hue)
d = train.map(crop)
e = train.map(flip_hori)
f = train.map(flip_vert)
g = train.map(rotate)'''
'''train = train.concatenate(a)
train = train.concatenate(b)
train = train.concatenate(c)
train = train.concatenate(d)
train = train.concatenate(e)
train = train.concatenate(f)
train = train.concatenate(g)'''

BATCH = 64
AT = tf.data.AUTOTUNE
BUFFER = 1000
STEPS_PER_EPOCH = 800//BATCH
VALIDATION_STEPS = 200//BATCH
train = train.cache().shuffle(BUFFER).batch(BATCH).repeat()
train = train.prefetch(buffer_size=AT)
val = val.batch(BATCH)

example = next(iter(train))
preds = unet(example[0])

pred_mask = tf.argmax(preds, axis=-1)
pred_mask = tf.expand_dims(pred_mask, -1)
plt.imshow(pred_mask[0])
plt.colorbar()

unet.compile(loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=keras.optimizers.RMSprop(learning_rate=0.001), metrics=['accuracy']);

hist = unet.fit(train, validation_data=val, steps_per_epoch=STEPS_PER_EPOCH, validation_steps=VALIDATION_STEPS, epochs=25);

img, mask = next(iter(val))
pred = unet.predict(img)

plt.figure(figsize=(10,5))
for i in pred:
    plt.subplot(121)
    i = tf.argmax(i, axis=-1)
    plt.imshow(i,cmap='jet')
    plt.axis('off')
    plt.title('Prediction')
    break

plt.subplot(122)
plt.imshow(mask[0], cmap='jet')
plt.axis('off')
plt.title('Ground Truth')
plt.show()

#accuracy

history = hist.history;
acc=history['accuracy'];
val_acc = history['val_accuracy'];
plt.plot(acc, '-', label='Training Accuracy');
plt.plot(val_acc, '--', label='Validation Accuracy');
plt.xlabel('Epochs');
plt.ylabel('Accuracy');
plt.legend();
plt.show();

#unet.save("model.h5");

我收到的错误消息是

Detected at node 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits' defined at (most recent call last):
    File "C:\Users\Fedor\OneDrive\Рабочий стол\Проект\Херня.py", line 176, in <module>
      hist = unet.fit(train, validation_data=val, steps_per_epoch=STEPS_PER_EPOCH, validation_steps=VALIDATION_STEPS, epochs=25);
    File "C:\Python\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
      return fn(*args, **kwargs)
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 1564, in fit
      tmp_logs = self.train_function(iterator)
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 1160, in train_function
      return step_function(self, iterator)
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 1146, in step_function
      outputs = model.distribute_strategy.run(run_step, args=(data,))
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 1135, in run_step
      outputs = model.train_step(data)
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 994, in train_step
      loss = self.compute_loss(x, y, y_pred, sample_weight)
    File "C:\Python\lib\site-packages\keras\engine\training.py", line 1052, in compute_loss
      return self.compiled_loss(
    File "C:\Python\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
      loss_value = loss_obj(y_t, y_p, sample_weight=sw)
    File "C:\Python\lib\site-packages\keras\losses.py", line 152, in __call__
      losses = call_fn(y_true, y_pred)
    File "C:\Python\lib\site-packages\keras\losses.py", line 272, in call
      return ag_fn(y_true, y_pred, **self._fn_kwargs)
    File "C:\Python\lib\site-packages\keras\losses.py", line 2084, in sparse_categorical_crossentropy
      return backend.sparse_categorical_crossentropy(
    File "C:\Python\lib\site-packages\keras\backend.py", line 5630, in sparse_categorical_crossentropy
      res = tf.nn.sparse_softmax_cross_entropy_with_logits(
Node: 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits'
logits and labels must have the same first dimension, got logits shape [49152,3] and labels shape [147456]
     [[{{node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits}}]] [Op:__inference_train_function_23362]
tensorflow neural-network
1个回答
0
投票

我认为这是因为您加载的模型的输出层与层的尺寸不匹配。要么重新定义输出层以匹配您的掩模,要么您可以定义自己的网络来训练它。我会推荐第二个选项,以便您充分了解正在发生的事情。这也将使模型与您自己的数据相匹配变得更加容易,从而使其表现更好。

这个例子可能对您有帮助: https://towardsdatascience.com/explaining-face-mask-image-classification-model-using-lime-8f423c601ff9

© www.soinside.com 2019 - 2024. All rights reserved.