ValueError.Tensor("ExponentialDecay_4:0", shape=(), dtype=float32) Tensor("ExponentialDecay_4:0", shape=(), dtype=float32)

问题描述 投票:1回答:1

一个用于查找语言类型的分类器,印地语或英语。

ValueError: Tensor("ExponentialDecay_4:0", shape=(), dtype=float32) must be from the same graph as Tensor(("dnn/hiddenlayer_0/kernel/part_0:0", shape=(), dtype=resource)).
from __future__ import absolute_import, division, print_function, unicode_literals
from absl import logging

import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import re
import seaborn as sns


df = pd.read_csv("intern_test/data/english_text.csv",  encoding="latin-1")
df2 = pd.read_csv("intern_test/data/hinglish_text.csv",  encoding="latin-1")

df['label'] = 0 # English
df2['label'] = 1 # Hindi

df3 = pd.concat([df,df2])

df3.head()
# Training input on the whole training set with no limit on training epochs.
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
    df3, df3["label"], num_epochs=None, shuffle=True)

# Prediction on the whole training set.
predict_train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
    df3, df3["label"], shuffle=False)
# Prediction on the test set.
#predict_test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
#    X_test, y_test, shuffle=False)

embedded_text_feature_column = hub.text_embedding_column(
    key="text", 
    module_spec="https://tfhub.dev/google/nnlm-en-dim128/1")

optimizer = tf.compat.v1.train.AdamOptimizer(
    learning_rate=tf.compat.v1.train.exponential_decay(
        global_step=0,
        learning_rate=0.1,
        decay_steps=10000,
        decay_rate=0.96, staircase=True))

estimator = tf.estimator.DNNClassifier(
    hidden_units=[500, 100],
    feature_columns=[embedded_text_feature_column],
    n_classes=2,
    optimizer=optimizer
)

estimator.train(input_fn=train_input_fn, steps=5000);

完全回溯。

ValueError                                Traceback (most recent call last)
<ipython-input-47-1c4563a14246> in <module>
      2 # batch size. This is roughly equivalent to 25 epochs since the training dataset
      3 # contains 25,000 examples.
----> 4 estimator.train(input_fn=train_input_fn, steps=5000);

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
    356 
    357       saving_listeners = _check_listeners_type(saving_listeners)
--> 358       loss = self._train_model(input_fn, hooks, saving_listeners)
    359       logging.info('Loss for final step: %s.', loss)
    360       return self

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
   1122       return self._train_model_distributed(input_fn, hooks, saving_listeners)
   1123     else:
-> 1124       return self._train_model_default(input_fn, hooks, saving_listeners)
   1125 
   1126   def _train_model_default(self, input_fn, hooks, saving_listeners):

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
   1152       worker_hooks.extend(input_hooks)
   1153       estimator_spec = self._call_model_fn(
-> 1154           features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
   1155       global_step_tensor = training_util.get_global_step(g)
   1156       return self._train_with_estimator_spec(estimator_spec, worker_hooks,

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/estimator.py in _call_model_fn(self, features, labels, mode, config)
   1110 
   1111     logging.info('Calling model_fn.')
-> 1112     model_fn_results = self._model_fn(features=features, **kwargs)
   1113     logging.info('Done calling model_fn.')
   1114 

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/canned/dnn.py in _model_fn(features, labels, mode, config)
    520           input_layer_partitioner=input_layer_partitioner,
    521           config=config,
--> 522           batch_norm=batch_norm)
    523 
    524     super(DNNClassifier, self).__init__(

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/canned/dnn.py in _dnn_model_fn(features, labels, mode, head, hidden_units, feature_columns, optimizer, activation_fn, dropout, input_layer_partitioner, config, use_tpu, batch_norm)
    300           labels=labels,
    301           optimizer=optimizer,
--> 302           logits=logits)
    303 
    304 

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/canned/head.py in create_estimator_spec(self, features, mode, logits, labels, optimizer, train_op_fn, regularization_losses)
    238           self._create_tpu_estimator_spec(
    239               features, mode, logits, labels, optimizer, train_op_fn,
--> 240               regularization_losses))
    241       return tpu_estimator_spec.as_estimator_spec()
    242     except NotImplementedError:

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow_estimator/python/estimator/canned/head.py in _create_tpu_estimator_spec(self, features, mode, logits, labels, optimizer, train_op_fn, regularization_losses)
   1244         train_op = optimizer.minimize(
   1245             regularized_training_loss,
-> 1246             global_step=training_util.get_global_step())
   1247       elif train_op_fn is not None:
   1248         train_op = train_op_fn(regularized_training_loss)

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/training/optimizer.py in minimize(self, loss, global_step, var_list, gate_gradients, aggregation_method, colocate_gradients_with_ops, name, grad_loss)
    411 
    412     return self.apply_gradients(grads_and_vars, global_step=global_step,
--> 413                                 name=name)
    414 
    415   def compute_gradients(self, loss, var_list=None,

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/training/optimizer.py in apply_gradients(self, grads_and_vars, global_step, name)
    610           scope_name = var.op.name
    611         with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
--> 612           update_ops.append(processor.update_op(self, grad))
    613       if global_step is None:
    614         apply_updates = self._finish(update_ops, name)

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/training/optimizer.py in update_op(self, optimizer, g)
    169       return optimizer._resource_apply_sparse_duplicate_indices(
    170           g.values, self._v, g.indices)
--> 171     update_op = optimizer._resource_apply_dense(g, self._v)
    172     if self._v.constraint is not None:
    173       with ops.control_dependencies([update_op]):

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/training/adam.py in _resource_apply_dense(self, grad, var)
    173         math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
    174         math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
--> 175         grad, use_locking=self._use_locking)
    176 
    177   def _apply_sparse_shared(self, grad, var, indices, scatter_add):

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/training/gen_training_ops.py in resource_apply_adam(var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, use_locking, use_nesterov, name)
   1300                              beta2=beta2, epsilon=epsilon, grad=grad,
   1301                              use_locking=use_locking,
-> 1302                              use_nesterov=use_nesterov, name=name)
   1303   return _op
   1304   _result = None

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
    348       # Need to flatten all the arguments into a list.
    349       # pylint: disable=protected-access
--> 350       g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
    351       # pylint: enable=protected-access
    352     except AssertionError as e:

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in _get_graph_from_inputs(op_input_list, graph)
   5711         graph = graph_element.graph
   5712       elif original_graph_element is not None:
-> 5713         _assert_same_graph(original_graph_element, graph_element)
   5714       elif graph_element.graph is not graph:
   5715         raise ValueError("%s is not from the passed-in graph." % graph_element)

~/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in _assert_same_graph(original_item, item)
   5647   if original_item.graph is not item.graph:
   5648     raise ValueError("%s must be from the same graph as %s." % (item,
-> 5649                                                                 original_item))
   5650 
   5651 
ValueError: Tensor("ExponentialDecay_4:0", shape=(), dtype=float32) must be from the same graph as Tensor("dnn/hiddenlayer_0/kernel/part_0:0", shape=(), dtype=resource).
python tensorflow machine-learning deep-learning nlp
1个回答
0
投票

你可以尝试使用函数给出指数衰减,该函数将学习率和步数作为输入,并将其作为 keras.callbacks.LearningRateScheduler 的输入。

这里是一个使用指数衰减的例子。

def exponential_decay_fn(epoch):
    return 0.01 * 0.1**(epoch / 20) 

def exponential_decay(lr0, s):
    def exponential_decay_fn(epoch):
        return lr0 * 0.1**(epoch / s)
    return exponential_decay_fn

exponential_decay_fn = exponential_decay(lr0=0.01, s=20)  

model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25  


lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
                    validation_data=(X_valid_scaled, y_valid),
                    callbacks=[lr_scheduler])

我希望这能回答你的问题。

© www.soinside.com 2019 - 2024. All rights reserved.