sklearn Logistic Regression "ValueError: 发现数组有dim 3。估计器预期<=2."

问题描述 投票:1回答:1

我设计了一个CNN自动编码器,将图像压缩成一个四维向量(名称:flatten),然后用PCA方法将结果可视化。

下面是我的模型。

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0, one_hot=True)
logs_path = "./log2/noiseRemoval"


inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')

### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='conv1')
# Now 26x26x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='VALID')
# Now 13x13x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv2')
# Now 11x11x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='VALID')
# Now 5x5x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv3')
# Now 3x3x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='SAME')
# Now 2x2x8
feature_map=tf.layers.conv2d(encoded, 1, (3,3), padding='SAME', activation=tf.nn.relu, name='feature_map')
#Now 2x2x1

###########For PCA Visualize
flatten = tf.reshape(feature_map, [-1, 4], name='flatten')



### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (4,4))
# 4x4x8
conv4 = tf.layers.conv2d_transpose(upsample1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans1')
# 6x6x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (11,11))
# 11x11x8
conv5 = tf.layers.conv2d_transpose(upsample2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans2')
# 13x13x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (26,26))
# 26x26x8
conv6 = tf.layers.conv2d_transpose(upsample3, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='trans3')
# 28x28x16

logits = tf.layers.conv2d_transpose(conv6, 1, (3,3), padding='SAME', activation=None, name='logits')
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')

#############################################################
#decoder2(resize)


upsample1_re = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4_re = tf.layers.conv2d(upsample1_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv4_re')
# Now 7x7x8
upsample2_re = tf.image.resize_nearest_neighbor(conv4_re, (14,14))
# Now 14x14x8
conv5_re = tf.layers.conv2d(upsample2_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv5_re')
# Now 14x14x8
upsample3_re = tf.image.resize_nearest_neighbor(conv5_re, (28,28))
# Now 28x28x8
conv6_re = tf.layers.conv2d(upsample3_re, 16, (3,3), padding='same', activation=tf.nn.relu, name='conv6_re')
# Now 28x28x16

logits_re = tf.layers.conv2d(conv6_re, 1, (3,3), padding='same', activation=None, name='logits_re')
#Now 28x28x1

decoded_re = tf.nn.sigmoid(logits_re, name='decoded_re')


####Optmizer
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)

loss_re=tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_re)
cost_re=tf.reduce_mean(loss_re)

opt = tf.train.AdamOptimizer(0.001).minimize(cost)
opt_re = tf.train.AdamOptimizer(0.001).minimize(cost_re)


# Add 5 images from original, noisy and reconstructed samples to summaries
tf.summary.image('inputs', tf.reshape(inputs_, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.image('targets', tf.reshape(targets_, (-1, 28, 28, 1)), max_outputs=4)

tf.summary.image('decoded', tf.reshape(decoded, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost',cost)

tf.summary.image('decoded_re', tf.reshape(decoded_re, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost_re',cost_re)
merged = tf.summary.merge_all()


#############Train###################
sess = tf.Session()
epochs = 1
batch_size = 200
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(logs_path, sess.graph)
for epoch in range(epochs):
    for ii in range(mnist.train.num_examples//batch_size):
        batch = mnist.train.next_batch(batch_size)
        imgs = batch[0].reshape((-1, 28, 28, 1))
        batch_cost, _,batch_cost_re,_re , summary= sess.run([cost, opt,cost_re,opt_re, merged],\
             feed_dict={inputs_: imgs,targets_: imgs})
        train_writer.add_summary(summary,epoch)

        print("Epoch: {}/{}...".format(epoch+1, epochs),
              "Training loss: {:.4f}".format(batch_cost),
              "Training loss_re: {:.4f}".format(batch_cost_re) )
    img2=mnist.train.images[0].reshape((-1, 28, 28, 1))
    code=sess.run([flatten],feed_dict={inputs_:img2})
sess.close()

训练完我的模型后,我想使用PCA包,但得到的结果是错误的。

####Visualize by PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X = pca.fit_transform(code)
Y = np.argmax(mnist.train.labels, axis=1)

# plot
plt.figure(figsize=(10, 8))
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.colorbar()
plt.show()

这是我试图做的代码,它给我的错误。

ValueError.Found array with dim 3: Found array with dim 3. Estimator expected <= 2.

编辑:我已经解决了这个问题,并在下面提供了我的代码,供有类似问题的人参考。

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
logs_path = "./log2/noiseRemoval"


inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')

### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='conv1')
# Now 26x26x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='SAME')
# Now 13x13x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv2')
# Now 11x11x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='SAME')
# Now 6x6x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv3')
# Now 4x4x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='SAME')
# Now 2x2x8

### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (4,4))
# 4x4x8
conv4 = tf.layers.conv2d_transpose(upsample1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans1')
# 6x6x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (11,11))
# 11x11x8
conv5 = tf.layers.conv2d_transpose(upsample2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans2')
# 13x13x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (26,26))
# 26x26x8
conv6 = tf.layers.conv2d_transpose(upsample3, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='trans3')
# 28x28x16

logits = tf.layers.conv2d_transpose(conv6, 1, (3,3), padding='SAME', activation=None, name='logits')
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')

#############################################################
#decoder2(resize)


upsample1_re = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4_re = tf.layers.conv2d(upsample1_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv4_re')
# Now 7x7x8
upsample2_re = tf.image.resize_nearest_neighbor(conv4_re, (14,14))
# Now 14x14x8
conv5_re = tf.layers.conv2d(upsample2_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv5_re')
# Now 14x14x8
upsample3_re = tf.image.resize_nearest_neighbor(conv5_re, (28,28))
# Now 28x28x8
conv6_re = tf.layers.conv2d(upsample3_re, 16, (3,3), padding='same', activation=tf.nn.relu, name='conv6_re')
# Now 28x28x16

logits_re = tf.layers.conv2d(conv6_re, 1, (3,3), padding='same', activation=None, name='logits_re')
#Now 28x28x1

decoded_re = tf.nn.sigmoid(logits_re, name='decoded_re')





loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)

loss_re=tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_re)
cost_re=tf.reduce_mean(loss_re)

opt = tf.train.AdamOptimizer(0.001).minimize(cost)
opt_re = tf.train.AdamOptimizer(0.001).minimize(cost_re)


# Add 5 images from original, noisy and reconstructed samples to summaries
tf.summary.image('feature_map', tf.reshape(conv2, (-1, 11, 11, 1)), max_outputs=12)
tf.summary.image('feature_map2', tf.reshape(conv3, (-1, 4, 4, 1)), max_outputs=12)

tf.summary.image('inputs', tf.reshape(inputs_, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.image('targets', tf.reshape(targets_, (-1, 28, 28, 1)), max_outputs=4)

tf.summary.image('decoded', tf.reshape(decoded, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost',cost)

tf.summary.image('decoded_re', tf.reshape(decoded, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost_re',cost_re)
merged = tf.summary.merge_all()


#############Train###################
sess = tf.Session()
epochs = 50
batch_size = 200
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(logs_path, sess.graph)
for epoch in range(epochs):
    for ii in range(mnist.train.num_examples//batch_size):
        batch = mnist.train.next_batch(batch_size)
        imgs = batch[0].reshape((-1, 28, 28, 1))
        batch_cost, _,batch_cost_re,_re , summary= sess.run([cost, opt,cost_re,opt_re, merged], feed_dict={inputs_: imgs,
                                                         targets_: imgs})
        train_writer.add_summary(summary,epoch)

        print("Epoch: {}/{}...".format(epoch+1, epochs),
              "Training loss: {:.4f}".format(batch_cost),
              "Training loss_re: {:.4f}".format(batch_cost_re) )



sess.close()
python tensorflow machine-learning scikit-learn pca
1个回答
0
投票

@刘书宏, 非常感谢你的解决方案。为了社区的利益,我把你的解决方案发布在这里(回答部分)。

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
logs_path = "./log2/noiseRemoval"


inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')

### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='conv1')
# Now 26x26x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='SAME')
# Now 13x13x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv2')
# Now 11x11x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='SAME')
# Now 6x6x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='conv3')
# Now 4x4x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='SAME')
# Now 2x2x8

### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (4,4))
# 4x4x8
conv4 = tf.layers.conv2d_transpose(upsample1, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans1')
# 6x6x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (11,11))
# 11x11x8
conv5 = tf.layers.conv2d_transpose(upsample2, 8, (3,3), padding='VALID', activation=tf.nn.relu, name='trans2')
# 13x13x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (26,26))
# 26x26x8
conv6 = tf.layers.conv2d_transpose(upsample3, 16, (3,3), padding='VALID', activation=tf.nn.relu, name='trans3')
# 28x28x16

logits = tf.layers.conv2d_transpose(conv6, 1, (3,3), padding='SAME', activation=None, name='logits')
#Now 28x28x1
decoded = tf.nn.sigmoid(logits, name='decoded')

#############################################################
#decoder2(resize)


upsample1_re = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4_re = tf.layers.conv2d(upsample1_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv4_re')
# Now 7x7x8
upsample2_re = tf.image.resize_nearest_neighbor(conv4_re, (14,14))
# Now 14x14x8
conv5_re = tf.layers.conv2d(upsample2_re, 8, (3,3), padding='same', activation=tf.nn.relu, name='conv5_re')
# Now 14x14x8
upsample3_re = tf.image.resize_nearest_neighbor(conv5_re, (28,28))
# Now 28x28x8
conv6_re = tf.layers.conv2d(upsample3_re, 16, (3,3), padding='same', activation=tf.nn.relu, name='conv6_re')
# Now 28x28x16

logits_re = tf.layers.conv2d(conv6_re, 1, (3,3), padding='same', activation=None, name='logits_re')
#Now 28x28x1

decoded_re = tf.nn.sigmoid(logits_re, name='decoded_re')





loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
cost = tf.reduce_mean(loss)

loss_re=tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits_re)
cost_re=tf.reduce_mean(loss_re)

opt = tf.train.AdamOptimizer(0.001).minimize(cost)
opt_re = tf.train.AdamOptimizer(0.001).minimize(cost_re)


# Add 5 images from original, noisy and reconstructed samples to summaries
tf.summary.image('feature_map', tf.reshape(conv2, (-1, 11, 11, 1)), max_outputs=12)
tf.summary.image('feature_map2', tf.reshape(conv3, (-1, 4, 4, 1)), max_outputs=12)

tf.summary.image('inputs', tf.reshape(inputs_, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.image('targets', tf.reshape(targets_, (-1, 28, 28, 1)), max_outputs=4)

tf.summary.image('decoded', tf.reshape(decoded, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost',cost)

tf.summary.image('decoded_re', tf.reshape(decoded, (-1, 28, 28, 1)), max_outputs=4)
tf.summary.scalar('cost_re',cost_re)
merged = tf.summary.merge_all()


#############Train###################
sess = tf.Session()
epochs = 50
batch_size = 200
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(logs_path, sess.graph)
for epoch in range(epochs):
    for ii in range(mnist.train.num_examples//batch_size):
        batch = mnist.train.next_batch(batch_size)
        imgs = batch[0].reshape((-1, 28, 28, 1))
        batch_cost, _,batch_cost_re,_re , summary= sess.run([cost, opt,cost_re,opt_re, merged], feed_dict={inputs_: imgs,
                                                         targets_: imgs})
        train_writer.add_summary(summary,epoch)

        print("Epoch: {}/{}...".format(epoch+1, epochs),
              "Training loss: {:.4f}".format(batch_cost),
              "Training loss_re: {:.4f}".format(batch_cost_re) )



sess.close()
© www.soinside.com 2019 - 2024. All rights reserved.