如何在Tensorflow中获取张量的值?

问题描述 投票:2回答:1

我正在训练在医学数据(图像)上执行CNN算法,我需要恢复最后一层的张量值来进行其他计算。

def _create_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1):

inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel])  # shape=(?, 32, 32, 1)

#inputX= tf.keras.backend.reshape(X, [-1, image_z, image_width, image_height, image_channel])

#print('inputs', inputX.shape)
# Vnet model
# layer1->convolution
layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop,
                           scope='layer0')
layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop,
                           scope='layer1')
layer1 = resnet_Add(x1=layer0, x2=layer1)
# down sampling1
down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1')
# layer2->convolution
layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_1')
layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop,
                           scope='layer2_2')
layer2 = resnet_Add(x1=down1, x2=layer2)
# down sampling2
down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2')
# layer3->convolution
layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_1')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_2')
layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop,
                           scope='layer3_3')
layer3 = resnet_Add(x1=down2, x2=layer3)
# down sampling3
down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3')
# layer4->convolution
layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_1')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_2')
layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop,
                           scope='layer4_3')
layer4 = resnet_Add(x1=down3, x2=layer4)
# down sampling4
down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4')
# layer5->convolution
layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_1')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_2')
layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop,
                           scope='layer5_3')
layer5 = resnet_Add(x1=down4, x2=layer5)

# layer9->deconvolution
deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1')
# layer8->convolution
layer6 = crop_and_concat(layer4, deconv1)
_, Z, H, W, _ = layer4.get_shape().as_list()
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_1')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_2')
layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer6_3')
layer6 = resnet_Add(x1=deconv1, x2=layer6)
# layer9->deconvolution
deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2')
# layer8->convolution
layer7 = crop_and_concat(layer3, deconv2)
_, Z, H, W, _ = layer3.get_shape().as_list()
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_1')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_2')
layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer7_3')
layer7 = resnet_Add(x1=deconv2, x2=layer7)
# layer9->deconvolution
deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3')
# layer8->convolution
layer8 = crop_and_concat(layer2, deconv3)
_, Z, H, W, _ = layer2.get_shape().as_list()
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_1')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_2')
layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer8_3')
layer8 = resnet_Add(x1=deconv3, x2=layer8)
# layer9->deconvolution
deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4')
# layer8->convolution
layer9 = crop_and_concat(layer1, deconv4)
_, Z, H, W, _ = layer1.get_shape().as_list()
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_1')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_2')
layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase,
                           drop=drop, scope='layer9_3')
layer9 = resnet_Add(x1=deconv4, x2=layer9)
# layer14->output
output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 32, n_class), scope='output')
y =tf.shape(output_map)
#print('output map shape of output', y)

sess = tf.InteractiveSession()
print(output_map.eval())

'''with tf.Session() as s:
         tf.initialize_all_variables().run()
         xx= tf.rank(output_map)
         print ('rank_output_map is ',s.run(xx))'''


return output_map

我使用了两种方法来获取张量的值。

  1. tensor.eval()
  2. Session.run(张量)

但我有同样的错误,如果你能帮助我。请在这里输入图片描述

python tensorflow eval tensor cnn
1个回答
0
投票

你可以直接在张量上执行sess.run来获取值。首先你需要一个张量。你可以在 build_model 中添加一个 name 参数给它命名 (你可以为任何张量命名),比如。

Layer_name = tf.add(tf.multiply(Flat, W1), b1, name="Layer_name")

稍后,你可以得到该层的张量并对其进行评估。

with tf.Session() as sess:
    Layer_name = tf.get_default_graph().get_tensor_by_name('Layer_name:0')
    FC1_values = sess.run(Layer_name, feed_dict={x: input_img_arr})
© www.soinside.com 2019 - 2024. All rights reserved.