Evaluate U-Net by layer

Question:

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.

I would like to check the U-Net layer by layer but I don’t know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.

These are my codes.

import os
import matplotlib.pyplot as plt

import tensorflow as tf 
from keras_preprocessing.image 
import ImageDataGenerator 
from tensorflow import keras


#Constants
SEED = 42 
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16

IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)

data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')

data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')

NUM_TRAIN = 1413
NUM_TEST = 210

NUM_OF_EPOCHS = 10

def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
    data_gen_args = dict(rescale=1./255)
    img_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(*data_gen_args)

    img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    return zip(img_generator, mask_generator)


def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
    data_gen_args = dict(rescale=1./255)
    img_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(*data_gen_args)

    img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
    return zip(img_generator, mask_generator)


def display(display_list):
    plt.figure(figsize=(15,15))
   
    title = ['Input Image', 'True Mask', 'Predicted Mask']
   
    for i in range(len(display_list)):
        plt.subplot(1, len(display_list), i+1)
        plt.title(title[i])
        plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
    plt.show()


def show_dataset(datagen, num=1):
    for i in range(0,num):
        image,mask = next(datagen)
        display([image[0], mask[0]])


def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
    #n_blocks = how many conv in each level
    inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
    x = inputs

    convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')

    #downstream
    skips = {}
    for level in range(n_levels):
        for _ in range (n_blocks):
            x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
        if level < n_levels - 1:
            skips[level] = x
            x = keras.layers.MaxPool2D(pooling_size)(x)
    
    #upstream
    for level in reversed(range(n_levels-1)):
        x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
        x = keras.layers.Concatenate()([x, skips[level]])
        for _ in range (n_blocks):
            x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)

    #output
    activation = 'sigmoid' if out_channels == 1 else 'softmax'
    x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid',  padding='same')(x)
    
    return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')


EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN

model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])


model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)


def show_prediction(datagen, num=1):
    for i in range(0,num):
        image,mask = next(datagen)
        pred_mask = model.predict(image)[0] > 0.5
        display([image[0], mask[0], pred_mask])


show_prediction(test_generator, 2)
Asked By: jonedabb

||

Answers:

To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

#luodaan input
inputs=keras.Input(shape=(1,))

#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)

#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")

#Tarkastellaan
model.summary()

#Tallennellaan
model.save(".model_to_be_investigated_by_someone_else_to_help_you")

…this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.

Answered By: Experience_In_AI

The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

enter image description here

Answered By: Experience_In_AI