Please Help :( model.evaluate() doesn't work with none label_mode value provided by image generator

I’m trying to run model.evaluate() on preprocessing.image_dataset_from_directory to no avail because of label_mode=None

I am trying to achieve a similar functionality to class_mode=‘input’ from flow_from_directory from ImageDataGenerator. I’ve tried multiple times and keep getting the same error message. The problem is the label_mode=None in my generator code. Whenever I try changing it to something else, the code can’t find the images. I’ve also tried labels=‘inferred’ to no avail. Please help. Below is my code:

import matplotlib.pyplot as plt
import numpy as np
import random
from PIL import Image
import tensorflow
from tensorflow import keras
from keras import layers, preprocessing, Sequential
from sklearn.neighbors import KernelDensity
import glob

class CustomImageDataset:
    def __init__(self, directory, image_size, batch_size, label_mode):
        self.dataset = tensorflow.keras.preprocessing.image_dataset_from_directory(
            directory,
            image_size=image_size,
            batch_size=batch_size,
            label_mode=label_mode
        )
        self.label_mode = label_mode

    def __iter__(self):
        return iter(self.dataset)

    def __len__(self):
        return len(self.dataset)

    def map(self, *args, **kwargs):
        return self.dataset.map(*args, **kwargs)

    def batch(self, *args, **kwargs):
        return self.dataset.batch(*args, **kwargs)

    def prefetch(self, *args, **kwargs):
        return self.dataset.prefetch(*args, **kwargs)


SIZE = 8
batch_size = 64

train_generator = preprocessing.image_dataset_from_directory(
    r'C:\Users\{}\Downloads\archive (1)\noncloud_train',    
    image_size=(SIZE, SIZE),
    batch_size=batch_size,
    label_mode=None
)

validation_generator = preprocessing.image_dataset_from_directory(
    r'C:\Users\{}\Downloads\archive (1)\noncloud_test',
    image_size=(SIZE, SIZE),
    batch_size=batch_size,
    label_mode=None
)

anomaly_generator = CustomImageDataset(
    r'C:\Users\{}\Downloads\archive (1)\cloud',
    image_size=(SIZE, SIZE),
    batch_size=batch_size,
    label_mode=None
)



rescaling_layer = layers.Rescaling(1./255)

def change_inputs(images):
  x = tensorflow.image.resize(rescaling_layer(images),[SIZE, SIZE], method=tensorflow.image.ResizeMethod.NEAREST_NEIGHBOR)
  return x, x


# Apply preprocessing to datasets
train_dataset = train_generator.map(change_inputs)
validation_dataset = validation_generator.map(change_inputs)
anomaly_dataset = anomaly_generator.map(change_inputs)

test = anomaly_generator.label_mode
def check_none_in_dataset(dataset):
    for batch in dataset:
        images, labels = batch
        if images is None or labels is None:
            print("Found None in dataset")
            return True
    print("No None values in dataset")
    return False

# Check validation dataset
print("Checking validation dataset for None values:")
c = check_none_in_dataset(validation_dataset)
print(c)

def print_labels_from_dataset(dataset, num_batches=1):
    for images, labels in dataset.take(num_batches):
        print("Labels (should be the same as images):")
        print(labels.numpy())  # Print the labels to check if they are the expected values (not None)s
        print(labels.numpy() == images.numpy())

print("Validation Dataset Labels:")
bat = print_labels_from_dataset(validation_dataset)

print("Anomaly Dataset Labels:")
cow = print_labels_from_dataset(anomaly_dataset)


model = Sequential()
# Encoder
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(SIZE, SIZE, 3)))
model.add(layers.MaxPooling2D((2, 2), padding='same')) 
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2), padding='same'))
model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2), padding='same'))


# Deconder
model.add(layers.Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(layers.UpSampling2D((2, 2)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.UpSampling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.UpSampling2D((2, 2)))


model.add(layers.Conv2D(3, (3, 3), activation='sigmoid', padding='same'))


model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse'])
model.summary()

history = model.fit(
    train_dataset,
    steps_per_epoch = 1500 // batch_size,
    epochs = 1000,
    validation_data = validation_dataset,
    validation_steps = 225 // batch_size,
    shuffle = True
)

# Examine the recon. error between val data and anomaly images
validation_error = model.evaluate(validation_generator)
anomaly_error = model.evaluate(anomaly_generator)

I think you should use the above datasets (preprocessed datasets) while evaluating your model’s performance instead of validation_generator, anomaly_generator in the below code.

This will help you to solve your problem.Thanks