ValueError: A `Concatenate` layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape=[(None, 2, 2, 128), (None, 3, 3, 128)]

Dear All, I received the following error, " ValueError: A Concatenate layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape=[(None, 2, 2, 128), (None, 3, 3, 128)]"
I found all the shape of the predecessor shape is matching the following, here comes the entire example would you mind to show me where is the error?

# model with padded convolutions for the fashion mnist dataset
from numpy import mean
from numpy import std
from matplotlib import pyplot
from sklearn.model_selection import KFold       
from keras.datasets import fashion_mnist
from keras.utils import to_categorical  
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense
from keras.layers import Flatten
from keras.optimizers import SGD

import numpy as np
import tensorflow as tf
from tensorflow import keras
# load train and test dataset (ORIGINAL)
def load_dataset():
 # load dataset
 (trainX, trainY), (testX, testY) = tf.keras.datasets.fashion_mnist.load_data() #datasets.cifar100
 # Decrease the number of batches for X and Y train and test.
 trainX = trainX[::4]
 trainX = trainX.repeat(8, axis=1).repeat(8, axis=2)
 trainY = trainY[::4]
 testX = testX[::4]
 testX = testX.repeat(8, axis=1).repeat(8, axis=2)
 testY = testY[::4]
 # reshape dataset to have a single channel
 trainX = trainX.reshape((trainX.shape[0], 224, 224, 1))
 testX = testX.reshape((testX.shape[0], 224, 224, 1))
#  one hot encode target values
#  trainY = np.asarray(trainY).astype('float32').reshape((-1, 1))
#  testY = np.asarray(testY).astype('float32').reshape((-1, 1))

 trainY = to_categorical(trainY, 10)
 testY = to_categorical(testY, 10)
#  trainY = tf.keras.utils.to_categorical(trainY, 10, dtype='int')
#  testY = tf.keras.utils.to_categorical(testY, 10, dtype='int')
#  print(trainX.shape, trainY.shape, testX.shape, testY.shape)
 return trainX, trainY, testX, testY

# scale pixels


# could be converted to ('float32') as well
# scale pixels
def prep_pixels(train, test):
   # convert from integers to floats
 
  train_norm = train.astype('float32')
  test_norm  = test.astype('float32')
  
  
  	# normalize to range 0-1
  train_norm = train_norm / 255.0
  test_norm  = test_norm / 255.0
  	# return normalized images
  return train_norm, test_norm

# plot diagnostic learning curves

def summarize_diagnostics(histories):
	for i in range(len(histories)):
		# plot loss
		pyplot.subplot(211)
		pyplot.title('Cross Entropy Loss')
		pyplot.plot(histories[i].history['loss'], color='blue', label='train')
		pyplot.plot(histories[i].history['val_loss'], color='orange', label='test')
	
		# plot accuracy
		pyplot.subplot(212)
		pyplot.title('Classification Accuracy')
		pyplot.plot(histories[i].history['accuracy'], color='blue', label='train')
		pyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')
	  
	pyplot.show()

# summarize model performance
def summarize_performance(scores):
	# print summary
	print('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))
	# box and whisker plots of results
	pyplot.boxplot(scores)
	pyplot.show()

import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Conv2D, \
     MaxPooling2D, Flatten, Dense, BatchNormalization, Activation, concatenate, Conv2DTranspose, Dropout

def multi_unet_model(n_classes=10, IMG_HEIGHT=224, IMG_WIDTH=224, IMG_CHANNELS=1):

  #Build the model
    inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
    #s = Lambda(lambda x: x / 255)(inputs)   #No need for this if we normalize our inputs beforehand
    s = inputs

    #Contraction path
    c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)
    
    c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)
     
    c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)
     
    c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)
     
    c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
    
    #Expansive path 
    u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
     
    u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
     
    u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
     
    u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
     
    outputs = Conv2D(n_classes, (1, 1), activation='softmax')(c9)
     
    model = Model(inputs=[inputs], outputs=[outputs])
    
    model = Model(inputs=[inputs], outputs=[outputs])
    model.compile(loss="categorical_crossentropy", 
                optimizer=tf.keras.optimizers.Adam()) #sparse_categorical_crossentropy, binary_crossentropy
    # categorical_crossentropy
    # NOTE: Compile the model in the main program to make it easy to test with various loss functions
    # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    
    # model.summary()
    
    return model

# evaluate a model using k-fold cross-validation

def evaluate_model(dataX, dataY, n_folds=5):
	
  
	scores, histories = list(), list()
	# prepare cross validation
	kfold = KFold(n_folds, shuffle=True, random_state=1)
	# enumerate splits

	
	for train_ix, test_ix in kfold.split(dataX):
		# define model

		# model = unet(pretrained_weights = None,input_size = (28,28,1))
		# model = multi_unet_model(n_classes=10, IMG_HEIGHT=224, IMG_WIDTH=224, IMG_CHANNELS=1)
		  

		model = multi_unet_model(n_classes=10, IMG_HEIGHT=224, IMG_WIDTH=224, IMG_CHANNELS=1)
		# model = build_unet(shape)
 
		# select rows for train and test
		
	
		trainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]
			
	# 	# fit model
		history = model.fit(trainX, trainY, epochs=1, batch_size=6, validation_data=(testX, testY), verbose=1)
	# 	# evaluate model
		_, acc = model.evaluate(testX, testY, verbose=1)
		print('> %.3f' % (acc * 100.0))
	# 	# append scores
		scores.append(acc)
		histories.append(history)
	return scores, histories

# run the test harness for evaluating a model
def run_test_harness():
	# load dataset
	trainX, trainY, testX, testY = load_dataset()
	# prepare pixel data
	trainX, testX = prep_pixels(trainX, testX)
	# evaluate model
	scores, histories = evaluate_model(trainX, trainY)
	# learning curves
	summarize_diagnostics(histories)
	# plot_loss_curves(histories)
	# summarize estimated performance
	summarize_performance(scores)

# entry point, run the test harness
run_test_harness()

The entire error,

"---------------------------------------------------------------------------

ValueError Traceback (most recent call last)

in <cell line: 16>()
14
15 # entry point, run the test harness
—> 16 run_test_harness()

5 frames

/usr/local/lib/python3.10/dist-packages/keras/layers/merging/concatenate.py in build(self, input_shape)
129 )
130 if len(unique_dims) > 1:
→ 131 raise ValueError(err_msg)
132
133 def _merge_function(self, inputs):

ValueError: A Concatenate layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape=[(None, 2, 2, 128), (None, 3, 3, 128)]"

@FALAH_FAKHRI,

Welcome to the Tensorflow Forum!

ValueError: A Concatenate layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape=[(None, 2, 2, 128), (None, 3, 3, 128)]

Could you please share actual standalone code to replicate above issue? I am facing different error while running above code. Please find the gist.

Thank you!