IndexError: list index out of range while keras model.predict()

Question:

If I try to predict with my model with 3 inputs with this code:

predicted_seaLevel = []

prediction = model2.predict(
    [[np.array(X[0][0])],[np.array(X[1][0])],[np.array(X[2][0])]],
    batch_size=None,
    verbose='auto',
    steps=None,
    callbacks=None,
    max_queue_size=10,
    workers=1,
    use_multiprocessing=False
    )
predicted_seaLevel.append(prediction)

I get this error:

IndexError                                Traceback (most recent call last)
Input In [119], in <cell line: 3>()
      1 predicted_seaLevel = []
----> 3 prediction = model2.predict(
      4     [[np.array(X[0][0])],[np.array(X[1][0])],[np.array(X[2][0])]],
      5     batch_size=None,
      6     verbose='auto',
      7     steps=None,
      8     callbacks=None,
      9     max_queue_size=10,
     10     workers=1,
     11     use_multiprocessing=False
     12     )
     13 predicted_seaLevel.append(prediction)

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1720, in Model.predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
   1714   except ValueError:
   1715     warnings.warn('Using Model.predict with '
   1716                   'MultiWorkerDistributionStrategy or TPUStrategy and '
   1717                   'AutoShardPolicy.FILE might lead to out-of-order result'
   1718                   '. Consider setting it to AutoShardPolicy.DATA.')
-> 1720 data_handler = data_adapter.get_data_handler(
   1721     x=x,
   1722     batch_size=batch_size,
   1723     steps_per_epoch=steps,
   1724     initial_epoch=0,
   1725     epochs=1,
   1726     max_queue_size=max_queue_size,
   1727     workers=workers,
   1728     use_multiprocessing=use_multiprocessing,
   1729     model=self,
   1730     steps_per_execution=self._steps_per_execution)
   1732 # Container that configures and calls `tf.keras.Callback`s.
   1733 if not isinstance(callbacks, callbacks_module.CallbackList):

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginedata_adapter.py:1383, in get_data_handler(*args, **kwargs)
   1381 if getattr(kwargs["model"], "_cluster_coordinator", None):
   1382   return _ClusterCoordinatorDataHandler(*args, **kwargs)
-> 1383 return DataHandler(*args, **kwargs)

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginedata_adapter.py:1138, in DataHandler.__init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution, distribute)
   1135   self._steps_per_execution_value = steps_per_execution.numpy().item()
   1137 adapter_cls = select_data_adapter(x, y)
-> 1138 self._adapter = adapter_cls(
   1139     x,
   1140     y,
   1141     batch_size=batch_size,
   1142     steps=steps_per_epoch,
   1143     epochs=epochs - initial_epoch,
   1144     sample_weights=sample_weight,
   1145     shuffle=shuffle,
   1146     max_queue_size=max_queue_size,
   1147     workers=workers,
   1148     use_multiprocessing=use_multiprocessing,
   1149     distribution_strategy=tf.distribute.get_strategy(),
   1150     model=model)
   1152 strategy = tf.distribute.get_strategy()
   1154 self._current_step = 0

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginedata_adapter.py:240, in TensorLikeDataAdapter.__init__(self, x, y, sample_weights, sample_weight_modes, batch_size, epochs, steps, shuffle, **kwargs)
    235 (sample_weights, _, _) = training_utils.handle_partial_sample_weights(
    236     y, sample_weights, sample_weight_modes, check_all_flat=True)
    238 inputs = pack_x_y_sample_weight(x, y, sample_weights)
--> 240 num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(inputs)).pop()
    241 _check_data_cardinality(inputs)
    243 # If batch_size is not passed but steps is, calculate from the input data.
    244 # Default to 32 for backwards compat.

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginedata_adapter.py:240, in <genexpr>(.0)
    235 (sample_weights, _, _) = training_utils.handle_partial_sample_weights(
    236     y, sample_weights, sample_weight_modes, check_all_flat=True)
    238 inputs = pack_x_y_sample_weight(x, y, sample_weights)
--> 240 num_samples = set(int(i.shape[0]) for i in tf.nest.flatten(inputs)).pop()
    241 _check_data_cardinality(inputs)
    243 # If batch_size is not passed but steps is, calculate from the input data.
    244 # Default to 32 for backwards compat.

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythonframeworktensor_shape.py:896, in TensorShape.__getitem__(self, key)
    894 else:
    895   if self._v2_behavior:
--> 896     return self._dims[key].value
    897   else:
    898     return self._dims[key]

IndexError: list index out of range

This my Model:

def simpleFunctional():
    
    input1 = keras.layers.Input(shape=(1,))
    input2 = keras.layers.Input(shape=(1,))
    input3 = keras.layers.Input(shape=(1,))
    merged = keras.layers.Concatenate(axis=1)([input1, input2, input3])
    dense1 = keras.layers.Dense(16, input_dim=2, activation=keras.activations.relu, use_bias=True)(merged)
    dense2 = keras.layers.Dense(32, input_dim=2, activation=keras.activations.relu, use_bias=True)(dense1)
    dense3 = keras.layers.Dense(64, input_dim=2, activation=keras.activations.relu, use_bias=True)(dense2)
    dense4 = keras.layers.Dense(32, input_dim=2, activation=keras.activations.relu, use_bias=True)(dense3)
    dense5 = keras.layers.Dense(32, input_dim=2, activation=keras.activations.relu, use_bias=True)(dense4)
    output = keras.layers.Dense(1, activation=keras.activations.relu, use_bias=True)(dense5)
    model = keras.models.Model(inputs=[input1, input2, input3], outputs=output)
    
    
    #opt = SGD(learning_rate=0.001)
    model.compile(
        loss='binary_crossentropy',
        optimizer='adam',
        metrics=["accuracy"],)

    history = model.fit(X_train, Y_train, batch_size=45, epochs=900)

    test_scores = model.evaluate(X_test, Y_test, verbose=2)
    print("Test loss:", test_scores[0])
    print("Test accuracy:", test_scores[1])
    model.save("../AI_Lilly")
    return model

But this works just fine:

predicted_seaLevel = []

prediction = model2.predict(
    [[np.array([0.21468531468531468])],[np.array([0.4228191581323462])],[np.array([0.17076839761718698])]],
    batch_size=None,
    verbose='auto',
    steps=None,
    callbacks=None,
    max_queue_size=10,
    workers=1,
    use_multiprocessing=False
    )
predicted_seaLevel.append(prediction)

While:

[[np.array(X[0][0])],[np.array(X[1][0])],[np.array(X[2][0])]] == [[np.array([0.21468531468531468])],[np.array([0.4228191581323462])],[np.array([0.17076839761718698])]]

is true.

I also tried:

X_0 = []
X_1 = []
X_2 = []

for i in range(54):
    X_0.append(np.array(X[0][i]))
    X_1.append(np.array(X[1][i]))
    X_2.append(np.array(X[2][i]))

predicted_seaLevel = []

prediction = model2.predict(
    [[X_0[0]],[X_1[0]],[X_2[0]]],
    batch_size=None,
    verbose='auto',
    steps=None,
    callbacks=None,
    max_queue_size=10,
    workers=1,
    use_multiprocessing=False
    )
predicted_seaLevel.append(prediction)

But it gets me the same error.

I also tried:

predicted_seaLevel = []

prediction = model2.predict(
    [X[0][0],X[1][0],X[2][0]],
    batch_size=None,
    verbose='auto',
    steps=None,
    callbacks=None,
    max_queue_size=10,
    workers=1,
    use_multiprocessing=False
    )
predicted_seaLevel.append(prediction)

But it gets me this error:

Input In [146], in <cell line: 3>()
      1 predicted_seaLevel = []
----> 3 prediction = model2.predict(
      4     [X[0][0],X[1][0],X[2][0]],
      5     batch_size=None,
      6     verbose='auto',
      7     steps=None,
      8     callbacks=None,
      9     max_queue_size=10,
     10     workers=1,
     11     use_multiprocessing=False
     12     )
     13 predicted_seaLevel.append(prediction)

File ~.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1751, in Model.predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
   1749 for step in data_handler.steps():
   1750   callbacks.on_predict_batch_begin(step)
-> 1751   tmp_batch_outputs = self.predict_function(iterator)
   1752   if data_handler.should_sync:
   1753     context.async_wait()

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerdef_function.py:885, in Function.__call__(self, *args, **kwds)
    882 compiler = "xla" if self._jit_compile else "nonXla"
    884 with OptionalXlaContext(self._jit_compile):
--> 885   result = self._call(*args, **kwds)
    887 new_tracing_count = self.experimental_get_tracing_count()
    888 without_tracing = (tracing_count == new_tracing_count)

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerdef_function.py:924, in Function._call(self, *args, **kwds)
    921 self._lock.release()
    922 # In this case we have not created variables on the first call. So we can
    923 # run the first trace but we should fail if variables are created.
--> 924 results = self._stateful_fn(*args, **kwds)
    925 if self._created_variables and not ALLOW_DYNAMIC_VARIABLE_CREATION:
    926   raise ValueError("Creating variables on a non-first call to a function"
    927                    " decorated with tf.function.")

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerfunction.py:3038, in Function.__call__(self, *args, **kwargs)
   3035 """Calls a graph function specialized to the inputs."""
   3036 with self._lock:
   3037   (graph_function,
-> 3038    filtered_flat_args) = self._maybe_define_function(args, kwargs)
   3039 return graph_function._call_flat(
   3040     filtered_flat_args, captured_inputs=graph_function.captured_inputs)

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerfunction.py:3459, in Function._maybe_define_function(self, args, kwargs)
   3449 with ag_ctx.ControlStatusCtx(
   3450     status=ag_status, options=self._autograph_options):
   3451 
   (...)
   3454   # and 2. there's no provided input signature
   3455   # and 3. there's been a cache miss for this calling context
   3456   if (self._experimental_relax_shapes and
   3457       self.input_signature is None and
   3458       call_context_key in self._function_cache.missed):
-> 3459     return self._define_function_with_shape_relaxation(
   3460         args, kwargs, flat_args, filtered_flat_args, cache_key_context)
   3462   self._function_cache.missed.add(call_context_key)
   3463   graph_function = self._create_graph_function(args, kwargs)

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerfunction.py:3381, in Function._define_function_with_shape_relaxation(self, args, kwargs, flat_args, filtered_flat_args, cache_key_context)
   3374   (relaxed_arg_specs, relaxed_kwarg_specs) = nest.pack_sequence_as(
   3375       (args, kwargs), relaxed_arg_specs, expand_composites=False)
   3376   (args, kwargs) = nest.pack_sequence_as(
   3377       (relaxed_arg_specs, relaxed_kwarg_specs),
   3378       flat_args,
   3379       expand_composites=True)
-> 3381 graph_function = self._create_graph_function(
   3382     args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes)
   3383 self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function
   3385 return (graph_function, [
   3386     t for t in nest.flatten((args, kwargs), expand_composites=True)
   3387     if isinstance(t, (ops.Tensor,
   3388                       resource_variable_ops.BaseResourceVariable))
   3389 ])

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerfunction.py:3298, in Function._create_graph_function(self, args, kwargs, override_flat_arg_shapes)
   3293 missing_arg_names = [
   3294     "%s_%d" % (arg, i) for i, arg in enumerate(missing_arg_names)
   3295 ]
   3296 arg_names = base_arg_names + missing_arg_names
   3297 graph_function = ConcreteFunction(
-> 3298     func_graph_module.func_graph_from_py_func(
   3299         self._name,
   3300         self._python_function,
   3301         args,
   3302         kwargs,
   3303         self.input_signature,
   3304         autograph=self._autograph,
   3305         autograph_options=self._autograph_options,
   3306         arg_names=arg_names,
   3307         override_flat_arg_shapes=override_flat_arg_shapes,
   3308         capture_by_value=self._capture_by_value),
   3309     self._function_attributes,
   3310     function_spec=self.function_spec,
   3311     # Tell the ConcreteFunction to clean up its graph once it goes out of
   3312     # scope. This is not the default behavior since it gets used in some
   3313     # places (like Keras) where the FuncGraph lives longer than the
   3314     # ConcreteFunction.
   3315     shared_func_graph=False)
   3316 return graph_function

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythonframeworkfunc_graph.py:1007, in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes, acd_record_initial_resource_uses)
   1004 else:
   1005   _, original_func = tf_decorator.unwrap(python_func)
-> 1007 func_outputs = python_func(*func_args, **func_kwargs)
   1009 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
   1010 # TensorArrays and `None`s.
   1011 func_outputs = nest.map_structure(convert, func_outputs,
   1012                                   expand_composites=True)

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythoneagerdef_function.py:668, in Function._defun_with_scope.<locals>.wrapped_fn(*args, **kwds)
    664 with default_graph._variable_creator_scope(scope, priority=50):  # pylint: disable=protected-access
    665   # __wrapped__ allows AutoGraph to swap in a converted function. We give
    666   # the function a weak reference to itself to avoid a reference cycle.
    667   with OptionalXlaContext(compile_with_xla):
--> 668     out = weak_wrapped_fn().__wrapped__(*args, **kwds)
    669   return out

File ~.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythonframeworkfunc_graph.py:994, in func_graph_from_py_func.<locals>.wrapper(*args, **kwargs)
    992 except Exception as e:  # pylint_disable=broad-except
    993   if hasattr(e, "ag_error_metadata"):
--> 994     raise e.ag_error_metadata.to_exception(e)
    995   else:
    996     raise

ValueError: in user code:

    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1586 predict_function  *
        return step_function(self, iterator)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1576 step_function  **
        outputs = model.distribute_strategy.run(run_step, args=(data,))
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythondistributedistribute_lib.py:1286 run
        return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythondistributedistribute_lib.py:2849 call_for_each_replica
        return self._call_for_each_replica(fn, args, kwargs)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packagestensorflowpythondistributedistribute_lib.py:3632 _call_for_each_replica
        return fn(*args, **kwargs)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1569 run_step  **
        outputs = model.predict_step(data)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasenginetraining.py:1537 predict_step
        return self(x, training=False)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasenginebase_layer.py:1020 __call__
        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
    C:UsersFutureSpace.condaenvshandwritingNumbersAIlibsite-packageskerasengineinput_spec.py:199 assert_input_compatibility
        raise ValueError('Layer ' + layer_name + ' expects ' +

    ValueError: Layer model_18 expects 3 input(s), but it received 1 input tensors. Inputs received: [<tf.Tensor 'ExpandDims:0' shape=(None, 1) dtype=float32>]

I really have no idea how to solve and can’t explaine it to myself that one thing works but another thing which is equal to the first thing doesn’t work.

Please help!

Asked By: lilly schwarz

||

Answers:

You need to provide 3 inputs to your model, thus your input to predict has to have a shape like:

[(None, 1), (None, 1), (None, 1)]

to solve your problem, just use this:

model.predict(
    [
        np.array([[X[0][0]],]), # inputs of input1
        np.array([[X[1][0]],]), # inputs of input2
        np.array([[X[2][0]],]), # inputs of input3
    ],
    ...
)
Answered By: Alberto Sinigaglia