I am programming my model using a 3D-Unet architecture, 51GB of RAM and Nvdia V100 with 16GB of RAM as a GPU. These are under the google colab enviroment.
The images that compose the dataset are MRI images in nifti format, and the size is (1, 240, 240, 160, 1). They are grayscale and I use 1 as a batch size to see if it solves the problem (it doesn’t)
Here is my code:
def load_nifti_image(filepath):
nifti = nib.load(filepath)
volume = nifti.get_fdata()
return volume
# -----------------------TRAIN-----------------------
nifti_files = [os.path.join("/content/drive/MyDrive/Interpolated/train/images", f) for f in os.listdir("/content/drive/MyDrive/Interpolated/train/images") if f.endswith('.nii.gz')]
mask_files = [os.path.join("/content/drive/MyDrive/Interpolated/train/masks", f) for f in os.listdir("/content/drive/MyDrive/Interpolated/train/masks") if f.endswith('.nii.gz')]
nifti_images = [load_nifti_image(f) for f in nifti_files]
nifti_masks = [load_nifti_image(f) for f in mask_files]
final_nifti_images = [np.expand_dims(image, axis=-1) for image in nifti_images]
final_nifti_masks = [np.expand_dims(image, axis=-1) for image in nifti_masks]
dataset = tf.data.Dataset.from_tensor_slices((final_nifti_images, final_nifti_masks))
# -----------------------VALIDATION-----------------------
nifti_files_val = [os.path.join("/content/drive/MyDrive/Interpolated/validation/images", f) for f in os.listdir("/content/drive/MyDrive/Interpolated/validation/images") if f.endswith('.nii.gz')]
mask_files_val = [os.path.join("/content/drive/MyDrive/Interpolated/validation/masks", f) for f in os.listdir("/content/drive/MyDrive/Interpolated/validation/masks") if f.endswith('.nii.gz')]
nifti_images_val = [load_nifti_image(f) for f in nifti_files_val]
nifti_masks_val = [load_nifti_image(f) for f in mask_files_val]
final_nifti_images_val = [np.expand_dims(image, axis=-1) for image in nifti_images_val]
final_nifti_masks_val = [np.expand_dims(image, axis=-1) for image in nifti_masks_val]
dataset_val = tf.data.Dataset.from_tensor_slices((final_nifti_images_val, final_nifti_masks_val))
dataset = dataset.batch(1)
dataset_val = dataset_val.batch(1)
test_model.fit(dataset, validation_data=dataset_val, epochs=100)
I tried limiting the RAM of the GPU to 14GB, it does not work either, and also reducing the batch size. Note that I cannot change the model since I am using a predefined 3D-Unet model, I will paste it here too, maybe it helps:
# Convolutional Block
def conv_block(inputs, num_filters):
x = Conv3D(num_filters, (3, 3, 3), padding = "same")(inputs)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv3D(num_filters, (3, 3, 3), padding = "same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
# Encoder Block
def encoder_block(inputs, num_filters):
x = conv_block(inputs, num_filters)
p = MaxPool3D((2, 2, 2), padding="same")(x)
return x, p
# Decoder Block
def decoder_block(inputs, skip, num_filters):
x = Conv3DTranspose(num_filters, (2, 2, 2), strides=2, padding="same")(inputs)
x = Concatenate()([x, skip])
x = conv_block(x, num_filters)
return x
# UNET
def unet(input_shape):
inputs = Input(input_shape)
"----ENCODER----"
s1, p1 = encoder_block(inputs, 64)
s2, p2 = encoder_block(p1, 128)
s3, p3 = encoder_block(p2, 256)
s4, p4 = encoder_block(p3, 512)
"----BRIDGE---"
b1 = conv_block(p4, 1024)
"----DECODER----"
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
outputs = Conv3D(1, 1, padding="same", activation="sigmoid")(d4)
model = Model(inputs, outputs, name="UNET")
return model
input_shape = (240, 240, 160, 1)
test_model = unet(input_shape)
optimizer = Adam(learning_rate=0.0001)
test_model.compile(optimizer=optimizer, loss=dice_coefficient_loss, metrics=[dice_coefficient])