Accuracy and Validation Accuracy don't increase

I have this network which is supposed to recognize two overlapping rectangles. However the validation accuracy and the accuracy itself don’t increase.
Here is my code:

# %%
import tensorflow as tf
train_dir ='dataset/train'
test_dir  ='dataset/test'

# %%
width, height = 86, 86
training=tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255.0,
                                                          rotation_range=2,
                                                          horizontal_flip=True,
                                                          zoom_range=0.1,
                                                          vertical_flip=True,
                                                          validation_split=0.2
                                                         ).flow_from_directory(train_dir,
                                                                               class_mode = 'binary',
                                                                               batch_size = 32,
                                                           target_size=(width,height),
                                                                              subset="training")
testing=tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255.0,
                                                         ).flow_from_directory(test_dir,
                                                                               class_mode = 'binary',
                                                                               batch_size = 8,
                                                                               shuffle = False,
                                                           target_size=(width,height))
validing=tf.keras.preprocessing.image.ImageDataGenerator(rescale=1/255.0,
                                                          rotation_range=7,
                                                          horizontal_flip=True,
                                                         validation_split=0.05
                                                        ).flow_from_directory(train_dir,
                                                                              batch_size = 8,
                                                                              class_mode = 'binary',
                                                           target_size=(width,height),subset='validation',shuffle=True)

# %%
# Print dataset sizes and class distribution
print("Training set size:", training.samples)
print("Training class distribution:", training.classes)
print("Validation set size:", validing.samples)
print("Validation class distribution:", validing.classes)

# Visualize some samples
import matplotlib.pyplot as plt

def show_samples(generator, title):
    plt.figure(figsize=(10, 5))
    for i in range(8):
        imgs, labels = next(generator)
        plt.subplot(2, 4, i+1)
        plt.imshow(imgs[0])
        plt.title(f'Class: {labels[0]}')
    plt.suptitle(title)
    plt.show()

show_samples(training, "Training Samples")
show_samples(validing, "Validation Samples")

# %%
from keras.models import Sequential ,Model
from keras.layers import Dense ,Flatten ,Conv2D ,MaxPooling2D ,Dropout ,BatchNormalization  ,Activation ,GlobalMaxPooling2D
from keras.optimizers import Adam 
from keras.callbacks import EarlyStopping ,ReduceLROnPlateau

# %%
optimizer=tf.keras.optimizers.legacy.Adam
EarlyStop=EarlyStopping(patience=10,restore_best_weights=True)
Reduce_LR = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5, min_lr=1e-6, verbose=1)
callback=[EarlyStop , Reduce_LR]


# %%
from tensorflow.keras.layers import BatchNormalization, Dropout
from tensorflow.keras.regularizers import l2

num_classes = 2
num_detectors=32
l2_reg = 0.02  # Increased from 0.01

network = Sequential([

Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape=(86, 86, 3), padding='same'),

Conv2D(32, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2)),
#model.add(Dropout(0.1))

Conv2D(64, (3, 3), activation='relu', padding='same'),
Conv2D(64, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2)),
#model.add(Dropout(0.1))

Conv2D(128, (3, 3), activation='relu', padding='same'),
Conv2D(128, (3, 3), activation='relu', padding='same'),
MaxPooling2D(pool_size=(2, 2)),
#model.add(Dropout(0.1))

Flatten(),
Dense(516, activation='relu'),
#model.add(Dropout(0.1))

Dense(128, activation='relu'),
Dropout(0.4),

Dense(1, activation='sigmoid')


    # Conv2D(32, (3, 3), activation='relu', input_shape=(width, height, 3)),
    # MaxPooling2D(),
    # BatchNormalization(),
    
    # Conv2D(64, (3, 3), activation='relu'),
    # MaxPooling2D(),
    # BatchNormalization(),

    # Conv2D(128, (3, 3), activation='relu'),
    # MaxPooling2D(),
    # BatchNormalization(),
    
    # Flatten(),
    # Dense(128, activation='relu'),
    # Dropout(0.5),
    # Dense(1, activation='sigmoid')  # For binary classification
])

# %%

network.compile(optimizer="adam",loss='binary_crossentropy', metrics=["accuracy"])

# %%
network.summary()

# %%
import scipy
print(scipy.__version__)

# %%
from PIL import Image
from tensorflow.keras.preprocessing.image import load_img

# %%
history=network.fit(training,validation_data=validing,epochs=30, callbacks=callback, verbose=2)

# %%
val,los=network.evaluate(testing)

# %%
import matplotlib.pyplot as plt

metrics = history.history
plt.plot(history.epoch, metrics['loss'])
plt.legend(['loss'])
plt.show()

# %%
network.save('eyes.h5')

# %%




Positive example: pentagons-0727 hosted at ImgBB — ImgBB
Negative example: single-pentagon-191 hosted at ImgBB — ImgBB

I have 700 images for positive and negative cases. I tried changing the complexity of my model already in different ways.

Please help me out! I’ve been struggling for days

Hi @acorro, As there are only 700 samples and you are splitting those to training and validation, the training data will contain less than 700 samples which might be less for model to learn from those. Could please try to perform data augmentation to increase the training samples and see if there is any increase in model accuracy. Thank You.