python中的函数'blobFromTensor“不支持Tensor的数据类型

问题描述 投票:0回答:1

我试图从喀拉拉邦训练自己的项目。但是,由于使用cv2.readNetFromTensorflow或cv2.readNet,我遇到一个错误:函数“ bloblFromTensor”不支持Tensor的数据类型。

我在建立模型之前就使用了K.set_learing_phase(0)

我的模型构建是

def build(width, height, depth, classes):

        model = K.models.Sequential()
        inputShape = (height, width, depth)
        print(" there is the inputShape for hidden layers : {}  ".format(inputShape))

        # todo if  u delete set_learning, the result return it that below
        # raise ValueError("Tensor name '{0}' is invalid.".format(node.input[0]))
        # dk.set_learning_phase(0)

        # first hidden layers
        model.add(K.layers.Dense(16, input_shape=inputShape))
        model.add(K.layers.BatchNormalization())


        # first set of convv => relu => pool layers
        model.add(K.layers.Conv2D(32, (7, 7), padding="same", activation="relu"))
        model.add(K.layers.MaxPooling2D(pool_size=(2,2)))
        model.add(K.layers.BatchNormalization())
        model.add(K.layers.Dropout(0.25))

        # init dense layer for image resolution
        model.add(K.layers.Dense(64, activation="relu"))


        model.add(K.layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
        model.add(K.layers.MaxPooling2D(pool_size=(2,2)))
        model.add(K.layers.BatchNormalization())
        model.add(K.layers.Dropout(0.25))


        model.add(K.layers.Conv2D(128, (3, 3), activation="relu", padding="same"))
        model.add(K.layers.MaxPooling2D(pool_size=(2,2)))
        model.add(K.layers.BatchNormalization())
        model.add(K.layers.Dropout(0.25))


        # (None, 7, 7, 50) -> model.output_shape
        a, b, c, d = model.output_shape
        print("model ouput_shape giving ", a, b, c, d)
        a = b * c * d
        print("a -> ", a)

        model.add(K.layers.Permute([1, 2, 3]))
        model.add(K.layers.Reshape((a,)))

        # first set of fc => relu layers
        model.add(K.layers.Dense(units=512, activation='relu'))
        model.add(K.layers.BatchNormalization())
        model.add(K.layers.Dropout(0.5))

        # model.add(K.layers.Flatten())
        # sofmax classifier
        model.add(K.layers.Dense(units=classes, activation='softmax'))

        return model

因此,我将fit_generation方法用于培训历史,然后应用

# save file of model
    model.save(h5_file)

    # freeze the graph to pb file and pbtxt
    # ! not showing to console because that so take a long time
    frozen_graph = freeze_session(sess, output_names=[out.op.name for out in model.outputs])

    # convert to keras file to pb files for using readNetTensorflow
    try:
        tf.io.write_graph(frozen_graph, learn_path, model_file , as_text=False)
        log_file.register(0,"graph file saving to pb is success ")
    except ValueError as er:
        os.system('clear')
        console_log.d("ERROR -> NOT CONVERT TO PB FILE ",er)
        log_file.register(1, " ERROR that convert to pbfile : {} ".format(er))

    console_log.d("FROZEN GRAPH TO PB FILE ")

    # pb file convert to pbtxt file
    create_pbtxt(model_file , pbtxt_file, learn_path )

最后,我将删除了某些节点的pb文件转换为pbtxt文件

def create_pbtxt(pb_path, pbtxt_path, path):

    # clear console
    os.system("clear")

    console_log.d("CREATE_TXT FILE")
    console_log.d("there is the path file is this - --- > " , pb_path )

    # check path file from the news
    found_path_file = path + "/" + pb_path

    console_log.d("original path that saving here:",found_path_file)
    with tf.Session() as sess:
        with tf.gfile.GFile(found_path_file, 'rb')as f:
            # tf.io.compat
            graph_def = tf.GraphDef()
            # graph_def.ParseFromString(f.read())
            try:
               # text_format.Merge(f.read(), graph_def)
               graph_def.ParseFromString(f.read())
               tf.import_graph_def(graph_def, name='')
               console_log.d("pbtxt file is saving ")
            except text_format.ParseError:
                console_log.d("error from merge file for text_format")


            # inp_node = 'MobilenetV2/MobilenetV2/input'
            # oup_node = 'logits/semantic/BiasAdd'
            # gragh_def = optimize_for_inference_lib.optimğize_for_inference()

            name_array =[ ]
            op_array=[]

            # print("graph_def_node is here: ", graph_def.node)
            # for i in reversed((range(len(graph_def.node) ))):

            for i in reversed(range(len(graph_def.node))):
                # if op == 'Switch' or op == 'Merge':
                #     inp = graph_deadnode[i].input[0]
                #     print("inp : {}".format(inp))

                # graph_def node for addConstNodes error that
                log_file.register(0, " file is node for reversed : {} ".format(graph_def.node[i].op))
                # cv2 test file return addConstNode error from Const node exist (maybe)
                if graph_def.node[i].op =='Const':
                    console_log.d("found const variables!!!")
                    # console clear
                    os.system('clear')
                    del graph_def.node[i]


            # if graph_def.op == 'Const':
            #     os.system("clear")
            #     console_log.d("node deleting Cons ")
            #     console_log.d("graph_def.node[i]: ".format(graph_def.node[i]) )
            #     del graph_def.node[i]

            # !  extends adding for any error that return cv2 test file
            for attr in ['T', 'data_format', 'Tshape', 'N', 'Tidx', 'Tdim','use_cudnn_on_gpu', 'Index', 'Tperm', 'is_training','Tpaddings']:
                if attr in graph_def.node[i].attr:
                    console_log.d(" THERE IS THE ATTR ")
                    del graph_def.node[i].attr[attr]

            op = graph_def.node[i].op
            op_array.append(op)
            print("op: {}".format(op))
            name = graph_def.node[i].name
            name_array.append(name)
            print("name : {} ".format(name))
            # todo , try to op and name from nodes to logs file
            # convert to np array for values save to logs file
            op2_array = np.array(op_array)
            name2_array = np.array(name_array)

            # send values for logs
            log_file.register(0," op : {} , name:{} ".format(op2_array, name2_array))



    # saving pbtxt file from pb file that convert
    tf.io.write_graph( graph_def, path, pbtxt_path, as_text=True)

此问题来自testing.py。当调试光标到达cv2.readNetTensorflow或cv2.readNet时,程序返回“ 函数'blobFromTensor中不支持Tensor的数据类型>“

这是testing.py

# check the pbtxt with or without processing
    if flags.text_file is None: 
        net = cv2.dnn.readNet(pb_file)
    else: 
        txt_file = os.getcwd() + "/" + flags.text_file
        print(txt_file) 
        net = cv2.dnn.readNetFromTensorflow(pb_file,txt_file)

为什么会出现错误?我不知道。请帮助我。

python tensorflow keras cv2
1个回答
0
投票

我从此站点fossies.org找到了解决方案

我已将to_remove函数的removeUnusedNodesAndAttr添加到我的代码github中>

因为cv2不支持某些图层节点(我认为这很重要,所以您可以删除它们。

© www.soinside.com 2019 - 2024. All rights reserved.