IndexError: tuple index out of range when using Datasets with tensorflow 2.1.

问题描述 投票:2回答:1

生成我自己的TFRecords,我似乎无法在我的模型中正确使用数据集。为了测试是否是我当前的文件或模型代码中的某些东西,我使用了MNIST的tfds,但出现了同样的错误。

这个错误是。IndexError: tuple index out of range完整的输出在下面 我是从jupyter笔记本上做的,如果它改变了什么。

      1/Unknown - 0s 47ms/step
--------------------------------------------------------------------------- IndexError                                Traceback (most recent call last) <ipython-input-302-f8e9089d7285> in <module>
----> 1 model.fit(dataset['train'].batch(4096))

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
    817         max_queue_size=max_queue_size,
    818         workers=workers,
--> 819         use_multiprocessing=use_multiprocessing)
    820 
    821   def evaluate(self,

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing,
**kwargs)
    340                 mode=ModeKeys.TRAIN,
    341                 training_context=training_context,
--> 342                 total_epochs=epochs)
    343             cbks.make_logs(model, epoch_logs, training_result, ModeKeys.TRAIN)
    344 

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2.py in run_one_epoch(model, iterator, execution_function, dataset_size, batch_size, strategy, steps_per_epoch, num_samples, mode, training_context, total_epochs)
    126         step=step, mode=mode, size=current_batch_size) as batch_logs:
    127       try:
--> 128         batch_outs = execution_function(iterator)
    129       except (StopIteration, errors.OutOfRangeError):
    130         # TODO(kaftan): File bug about tf function and errors.OutOfRangeError?

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in execution_function(input_fn)
     96     # `numpy` translates Tensors to values in Eager mode.
     97     return nest.map_structure(_non_none_constant_value,
---> 98                               distributed_function(input_fn))
     99 
    100   return execution_function

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in __call__(self, *args, **kwds)
    566         xla_context.Exit()
    567     else:
--> 568       result = self._call(*args, **kwds)
    569 
    570     if tracing_count == self._get_tracing_count():

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _call(self, *args, **kwds)
    613       # This is the first call of __call__, so we have to initialize.
    614       initializers = []
--> 615       self._initialize(args, kwds, add_initializers_to=initializers)
    616     finally:
    617       # At this point we know that the initialization is complete (or less

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
    495     self._concrete_stateful_fn = (
    496         self._stateful_fn._get_concrete_function_internal_garbage_collected( 
# pylint: disable=protected-access
--> 497             *args, **kwds))
    498 
    499     def invalid_creator_scope(*unused_args, **unused_kwds):

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args,
**kwargs)    2387       args, kwargs = None, None    2388     with self._lock:
-> 2389       graph_function, _, _ = self._maybe_define_function(args, kwargs)    2390     return graph_function    2391 

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _maybe_define_function(self, args, kwargs)    2701     2702       self._function_cache.missed.add(call_context_key)
-> 2703       graph_function = self._create_graph_function(args, kwargs)    2704       self._function_cache.primary[cache_key] = graph_function    2705       return graph_function, args, kwargs

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)    2591             arg_names=arg_names,    2592             override_flat_arg_shapes=override_flat_arg_shapes,
-> 2593             capture_by_value=self._capture_by_value),    2594         self._function_attributes,    2595         # Tell the ConcreteFunction to clean up its graph once it goes out of

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
    976                                           converted_func)
    977 
--> 978       func_outputs = python_func(*func_args, **func_kwargs)
    979 
    980       # invariant: `func_outputs` contains only Tensors, CompositeTensors,

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/eager/def_function.py in wrapped_fn(*args, **kwds)
    437         # __wrapped__ allows AutoGraph to swap in a converted function. We give
    438         # the function a weak reference to itself to avoid a reference cycle.
--> 439         return weak_wrapped_fn().__wrapped__(*args, **kwds)
    440     weak_wrapped_fn = weakref.ref(wrapped_fn)
    441 

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in distributed_function(input_iterator)
     83     args = _prepare_feed_values(model, input_iterator, mode, strategy)
     84     outputs = strategy.experimental_run_v2(
---> 85         per_replica_function, args=args)
     86     # Out of PerReplica outputs reduce or pick values to return.
     87     all_outputs = dist_utils.unwrap_output_dict(

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in experimental_run_v2(self, fn, args, kwargs)
    761       fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx(),
    762                                 convert_by_default=False)
--> 763       return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    764 
    765   def reduce(self, reduce_op, value, axis):

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in call_for_each_replica(self, fn, args, kwargs)    1817       kwargs
= {}    1818     with self._container_strategy().scope():
-> 1819       return self._call_for_each_replica(fn, args, kwargs)    1820     1821   def _call_for_each_replica(self, fn, args, kwargs):

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/distribute/distribute_lib.py in _call_for_each_replica(self, fn, args, kwargs)    2162         self._container_strategy(),    2163         replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
-> 2164       return fn(*args, **kwargs)    2165     2166   def _reduce_to(self, reduce_op, value, destinations):

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
    290   def wrapper(*args, **kwargs):
    291     with ag_ctx.ControlStatusCtx(status=ag_ctx.Status.DISABLED):
--> 292       return func(*args, **kwargs)
    293 
    294   if inspect.isfunction(func) or inspect.ismethod(func):

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_v2_utils.py in train_on_batch(model, x, y, sample_weight, class_weight, reset_metrics, standalone)
    414   x, y, sample_weights = model._standardize_user_data(
    415       x, y, sample_weight=sample_weight, class_weight=class_weight,
--> 416       extract_tensors_from_dataset=True)
    417   batch_size = array_ops.shape(nest.flatten(x, expand_composites=True)[0])[0]
    418   # If `model._distribution_strategy` is True, then we are in a replica context

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)    2381         is_dataset=is_dataset,   2382         class_weight=class_weight,
-> 2383         batch_size=batch_size)    2384     2385   def _standardize_tensors(self, x, y, sample_weight, run_eagerly, dict_inputs,

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in _standardize_tensors(self, x, y, sample_weight, run_eagerly, dict_inputs, is_dataset, class_weight, batch_size)    2467           shapes=None,    2468           check_batch_axis=False,  # Don't enforce the batch size.
-> 2469           exception_prefix='target')    2470     2471       # Generate sample-wise weight values given the `sample_weight` and

~/miniconda3/envs/keras/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
    510                        'for each key in: ' + str(names))
    511   elif isinstance(data, (list, tuple)):
--> 512     if isinstance(data[0], (list, tuple)):
    513       data = [np.asarray(d) for d in data]
    514     elif len(names) == 1 and isinstance(data[0], (float, int)):

IndexError: tuple index out of range

最小的代码来重现这个问题。

from tensorflow.keras.layers import Dense, Embedding, Flatten, Lambda, Subtract, Input, Concatenate, Average, Reshape, GlobalAveragePooling1D, Dot, Dropout
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.utils import Sequence
from tensorflow.keras import initializers

import tensorflow_datasets as tfds
tfds.list_builders()
dataset, info = tfds.load("mnist", with_info=True)
inputs = Input((28, 28, 1), name="image")
First = Dense(128, activation="relu")
Second = Dropout(0.2)
Third = Dense(10, activation="softmax", name="label")

first = First(inputs)
second = Second(first)
third = Third(second)

model = Model(inputs=[inputs], outputs=[third])


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
model.fit(dataset['train'].batch(4096))

我想我一定是在文档中遗漏了什么 但我想不通 我已经敲了几个小时的代码了 模型从生成器中训练得很好,但随着数据集越来越大,我想转换一下。

python tensorflow keras tensorflow-datasets
1个回答
0
投票

添加 as_supervised=Truetfds.load() 就能解决这个问题。另一个问题是为什么会出现这个问题,可能是TF的一个BUG。

© www.soinside.com 2019 - 2024. All rights reserved.