from keras import backend as K
from keras.layers import Layer
from keras import regularizers
import tensorflow as tf
class ArcFace(Layer):
def init(self, n_classes=14, s=16.0, m=0.20, regularizer=None, **kwargs):
super(ArcFace, self).init(**kwargs)
self.n_classes = n_classes
self.s = s
self.m = m
self.regularizer = regularizers.get(regularizer)
def build(self, input_shape):
super(ArcFace, self).build(input_shape[0])
self.W = self.add_weight(name='W',
shape=(input_shape[0][-1], self.n_classes),
initializer='glorot_uniform',
trainable=True,
regularizer=self.regularizer)
def call(self, inputs):
x, y = inputs
c = K.shape(x)[-1]
# normalize feature
x = tf.nn.l2_normalize(x, axis=1)
# normalize weights
W = tf.nn.l2_normalize(self.W, axis=0)
# dot product
logits = x @ W
# add margin
# clip logits to prevent zero division when backward
theta = tf.acos(K.clip(logits, -1.0 + K.epsilon(), 1.0 - K.epsilon()))
target_logits = tf.cos(theta + self.m)
# sin = tf.sqrt(1 - logits**2)
# cos_m = tf.cos(logits)
# sin_m = tf.sin(logits)
# target_logits = logits * cos_m - sin * sin_m
#
logits = logits * (1 - y) + target_logits * y
# feature re-scale
logits *= self.s
out = tf.nn.softmax(logits)
return out
def compute_output_shape(self, input_shape):
return (None, self.n_classes)
root_dir = “/content/gdrive/MyDrive/dataset”
Create an ImageDataGenerator object to perform data augmentation
and rescaling
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
validation_split=0.2
)
Create a training set generator
train_generator = datagen.flow_from_directory(
root_dir,
target_size=(input_shape[:2]),
batch_size=32,
class_mode=‘categorical’,
subset=‘training’,
shuffle=True
)
Create a validation set generator
val_generator = datagen.flow_from_directory(
root_dir,
target_size=(input_shape[:2]),
batch_size=32,
class_mode=‘categorical’,
subset=‘validation’,
shuffle=True
)def create_facenet_model(input_shape=(160, 160, 3)):
# Define input tensors
input_layer = tf.keras.layers.Input(shape=input_shape)
label_layer = tf.keras.layers.Input(shape=(14,))
# Load pre-trained Inception-ResNet-v2 model
base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_layer)
# Freeze all layers in the base model
for layer in base_model.layers[:-10]:
layer.trainable = False
# Add custom top layers for transfer learning
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Flatten()(x)
#x = tf.keras.layers.Dense(512, activation='relu')(x)
x = Dense(512, kernel_initializer='he_normal')(x)
x = tf.keras.layers.BatchNormalization()(x)
regularizer = tf.keras.regularizers.l2(0.01)
# Apply ArcFace layer to get final predictions
predictions = ArcFace(n_classes = 14,regularizer=regularizer)([x, label_layer])
# Create transfer learning model
model = tf.keras.models.Model(inputs=[input_layer, label_layer], outputs=predictions)
return model
Create an instance of the FaceNet model
model = create_facenet_model()
model.compile(optimizer=‘adam’, loss=‘categorical_crossentropy’, metrics=[‘accuracy’])
Extract input tensor x and target tensor y from training set
for x, y in train_generator:
break # Only extract one batch for demonstration purposes
print(x,y)
Extract input tensor x and target tensor y from validation set
for x, y in val_generator:
break # Only extract one batch for demonstration purposes
print(x,y)
Train the model with increasing number of epochs
history = model.fit([x,y],
epochs=30,
validation_data=([x,y],y))
Plot the training and validation accuracy and loss
plot_history(history)
This is my code and im trying to implement arcface with inception resnetv2 using transfer learning for my dataset of 14 classes,but Im getting this error
can someone help with this error:
Epoch 1/30
ValueError Traceback (most recent call last)
in <cell line: 2>()
1 # Train the model with increasing number of epochs
----> 2 history = model.fit([x,y],
3 epochs=30,
4 validation_data=([x,y],y))
5
1 frames
/usr/local/lib/python3.9/dist-packages/keras/engine/training.py in tf__train_function(iterator)
13 try:
14 do_return = True
—> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1284, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1268, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1249, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1052, in train_step
self._validate_target_and_loss(y, loss)
File "/usr/local/lib/python3.9/dist-packages/keras/engine/training.py", line 1006, in _validate_target_and_loss
raise ValueError(
ValueError: Target data is missing. Your model was compiled with loss=categorical_crossentropy, and therefore expects target data to be provided in `fit()`.