Resolvilng: 'InvalidArgumentError: Number of channels must be 1, 3 or 4, was 0 [[{{node DecodeBmp_1}}]] [[{{node IteratorGetNext_11}}]]'

Question:

I am trying to train a neural network for it to find lines in images, the images are in bmp file format and grey-scale. After the network training phase has begun the program stops and outputs the corresponding error message.

This code is based entirely on ‘Image Segmentation with tf.keras’ by Raymond Yuan (https://ej.uz/hk9s) with changes in input pipeline, yet the model itself is exactly the same. I have tried redefining the shape of input to something else, yet every time I change something, the error message changes, but always happens at the same place.

#%%
import numpy as np
import matplotlib as mpl

import tensorflow as tf
from tensorflow.python.keras import layers
from tensorflow.python.keras import losses
from tensorflow.python.keras import models 
#%%
#Defining paths to all the images

x_train_filenames = []
y_train_filenames = []

x_eval_filenames = []
y_eval_filenames = []

for x in range(250):
    x_train_filenames.append(r'Train/Images/gen_{}_.bmp'.format(x))
    y_train_filenames.append(r'Train/Labels/gen_{}_seg_.bmp'.format(x))

    x_eval_filenames.append(r'Evaluate/Images/gen_{}_.bmp'.format(x))
    y_eval_filenames.append(r'Evaluate/Labels/gen_{}_seg_.bmp'.format(x))

num_train_examples = len(x_train_filenames)
num_eval_examples = len(x_eval_filenames)
#%%

#Creating dataset from all of the pathnames.

img_shape = (3296, 3008, 1)
batch_size = 3
epochs = 5
threads = 5

def _process_pathnames(img_name, lbl_name):
    img_str = tf.read_file(img_name)
    img = tf.image.decode_bmp(img_str)

    lbl_str = tf.read_file(lbl_name)
    lbl = tf.image.decode_bmp(lbl_str)

    return img, lbl

training_dataset = tf.data.Dataset.from_tensor_slices((x_train_filenames, y_train_filenames))
training_dataset = training_dataset.map(_process_pathnames, num_parallel_calls=threads)
training_dataset = training_dataset.shuffle(num_train_examples)
training_dataset = training_dataset.repeat().batch(batch_size)

evaluation_dataset = tf.data.Dataset.from_tensor_slices(((x_eval_filenames, y_eval_filenames)))
evaluation_dataset = evaluation_dataset.map(_process_pathnames, num_parallel_calls=threads)
evaluation_dataset = evaluation_dataset.shuffle(num_eval_examples)
evaluation_dataset = evaluation_dataset.repeat().batch(batch_size)
#%%

#Deining model

def conv_block(input_tensor, num_filters):
      encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor)
      encoder = layers.BatchNormalization()(encoder)
      encoder = layers.Activation('relu')(encoder)
      encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder)
      encoder = layers.BatchNormalization()(encoder)
      encoder = layers.Activation('relu')(encoder)
      return encoder

def encoder_block(input_tensor, num_filters):
      encoder = conv_block(input_tensor, num_filters)
      encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)

      return encoder_pool, encoder

def decoder_block(input_tensor, concat_tensor, num_filters):
      decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor)
      decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
      decoder = layers.BatchNormalization()(decoder)
      decoder = layers.Activation('relu')(decoder)
      decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
      decoder = layers.BatchNormalization()(decoder)
      decoder = layers.Activation('relu')(decoder)
      decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
      decoder = layers.BatchNormalization()(decoder)
      decoder = layers.Activation('relu')(decoder)
      return decoder

inputs = layers.Input(shape=img_shape)
encoder0_pool, encoder0 = encoder_block(inputs, 32)
encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64)
encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128)
encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256)
encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512)
center = conv_block(encoder4_pool, 1024)
decoder4 = decoder_block(center, encoder4, 512)
decoder3 = decoder_block(decoder4, encoder3, 256) 
decoder2 = decoder_block(decoder3, encoder2, 128)
decoder1 = decoder_block(decoder2, encoder1, 64)
decoder0 = decoder_block(decoder1, encoder0, 32)
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
model = models.Model(inputs=[inputs], outputs=[outputs])
#%%

#Defining custom loss functions

def dice_coeff(y_true, y_pred):
    smooth = 1.
    # Flatten
    y_true_f = tf.reshape(y_true, [-1])
    y_pred_f = tf.reshape(y_pred, [-1])
    intersection = tf.reduce_sum(y_true_f * y_pred_f)
    score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
    return score

def dice_loss(y_true, y_pred):
    loss = 1 - dice_coeff(y_true, y_pred)
    return loss

def bce_dice_loss(y_true, y_pred):
    loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
    return loss

model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_loss])
save_model_path = '/tmp/weights.hdf5'
cp = tf.keras.callbacks.ModelCheckpoint(filepath=save_model_path, monitor='val_dice_loss', save_best_only=True, verbose=1)
#%%

#Training the model

history = model.fit(training_dataset, 
                   steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))),
                   epochs=epochs,
                   validation_data=evaluation_dataset,
                   validation_steps=int(np.ceil(num_eval_examples / float(batch_size))),
                   callbacks=[cp])

Complete error message:

Traceback (most recent call last):

  File "<ipython-input-19-f1dcac0996cd>", line 1, in <module>
    runfile('//upb.lv/usr/profiles/Peteris.Zvejnieks/Desktop/Tests/Train data/A thing_retry.py', wdir='//upb.lv/usr/profiles/Peteris.Zvejnieks/Desktop/Tests/Train data')

  File "C:ProgramDataAnaconda3libsite-packagesspyder_kernelscustomizespydercustomize.py", line 786, in runfile
    execfile(filename, namespace)

  File "C:ProgramDataAnaconda3libsite-packagesspyder_kernelscustomizespydercustomize.py", line 110, in execfile
    exec(compile(f.read(), filename, 'exec'), namespace)

  File "//upb.lv/usr/profiles/Peteris.Zvejnieks/Desktop/Tests/Train data/A thing_retry.py", line 159, in <module>
    callbacks=[cp])

  File "C:ProgramDataAnaconda3libsite-packagestensorflowpythonkerasenginetraining.py", line 880, in fit
    validation_steps=validation_steps)

  File "C:ProgramDataAnaconda3libsite-packagestensorflowpythonkerasenginetraining_arrays.py", line 266, in model_iteration
    batch_outs = f(actual_inputs)

  File "C:ProgramDataAnaconda3libsite-packagestensorflowpythonkerasbackend.py", line 3076, in __call__
    run_metadata=self.run_metadata)

  File "C:ProgramDataAnaconda3libsite-packagestensorflowpythonclientsession.py", line 1439, in __call__
    run_metadata_ptr)

  File "C:ProgramDataAnaconda3libsite-packagestensorflowpythonframeworkerrors_impl.py", line 528, in __exit__
    c_api.TF_GetCode(self.status.status))

InvalidArgumentError: Number of channels must be 1, 3 or 4, was 0
     [[{{node DecodeBmp_1}}]]
     [[{{node IteratorGetNext_13}}]]   
Asked By: Peter

||

Answers:

Resolved it my self, it appears that tensorflow.image.decode_bmp(…) is faulty, so I switched my project entirely to png, and that seems to work just fine, as an added bonus I reduced the size of my data significantly, yet did`t loose any precious detail. However, now I understand, that what I am asking from my computer is extremely resource demanding.

Answered By: Peter

I had the same exact situation but with the following error message:

InvalidArgumentError: channels must be 0, 3 or 4 for BMP, but got 1
[[{{node decode_image/DecodeImage}}]] [Op:IteratorGetNext]

Had to change 14k images from bmp to jpg 🙁
But then it worked

Answered By: AureliuS

I had the same exact situation when i training a CNN to classify some image(BMP and grey-scale)

tf.keras.utils.image_dataset_from_directory(…, color_mode = "grayscale", …)Rules
regarding number of channels in the yielded images:
if color_mode is grayscale, there’s 1 channel in the image tensors.
if color_mode is rgb, there are 3 channel in the image tensors.
if color_mode is rgba, there are 4 channel in the image tensors.

InvalidArgumentError: channels must be 0, 3 or 4 for BMP, but got 1 [[{{node decode_image/DecodeImage}}]] [Op:IteratorGetNext]

is there any method to solve the situation if i don’t change my image to jpg?

Answered By: 张伟彬