GradientTape on eager mode

GradientTape cannot compute the gredients for the model. How can I debug this code?

class Training(keras.Model):

  def __init__(self, model):

    super(Training, self).__init__()

    self.model = model

  def compute_loss(self, texts, labels):

    texts = tf.math.l2_normalize(texts, axis=0)
    losses = tf.Variable(tf.zeros_like(labels, dtype=tf.float32), trainable=True, dtype=tf.float32)

    for index, label in enumerate(labels):

      pos_pairs = texts[labels == label]

      neg_pairs = texts[labels != label]

      if len(pos_pairs) > 1:

        p_list = tf.Variable( tf.zeros(pos_pairs.shape[0], dtype=tf.float32), trainable=True, dtype=tf.float32 )

        i = 0

        for pos_pair in pos_pairs:

          p_list[i].assign( keras.losses.cosine_similarity(texts[index], pos_pair) )

          i += 1

        p_list = tf.exp( p_list )

        p_list = p_list / tf.reduce_sum(p_list)

        p_loss = tf.reduce_sum( - tf.math.log( p_list ) )

      else:

        p_loss = 0.0

      if len(neg_pairs) > 1:

        n_list = tf.Variable( tf.zeros(neg_pairs.shape[0], dtype=tf.float32), trainable=True, dtype=tf.float32 )

        i = 0

        for neg_pair in neg_pairs:

          n_list[i].assign( keras.losses.cosine_similarity(texts[index], neg_pair) )

          i += 1

        n_list = tf.exp( n_list )

        n_list = n_list / tf.reduce_sum(n_list)

        n_loss = tf.reduce_sum( tf.math.log( n_list ) )

      else:

        n_loss = 0.0

        

      loss_on_sentence = p_loss + n_loss

      losses[index].assign( loss_on_sentence )

   

    loss = tf.reduce_mean(losses)

    return loss

  def train_step(self, data):

    texts = data[0]

    labels = data[1]

    #print(labels, texts)

    with tf.GradientTape() as tape:

      texts = self.model(texts)

      loss = self.compute_loss(texts, labels)

      print(loss)

    trainable_vars = self.trainable_variables

    #print(trainable_vars)

    gradients = tape.gradient(loss, trainable_vars)

    print(gradients)

    self.optimizer.apply_gradients(zip(gradients, trainable_vars))

    loss_tracker.update_state(loss)

    return {"loss": loss_tracker.result()}

  @property

  def metrics(self):

    return [loss_tracker]

trainer = Training(model)

trainer.compile(optimizer='adam',  run_eagerly=True)

trainer.fit(train_dataset, callbacks=[tensorboard_callback])

Error:
ValueError: No gradients provided for any variable: ['dense1/kernel:0', 'dense1/bias:0', 'dense2/kernel:0', 'dense2/bias:0', 'bn1/gamma:0', 'bn1/beta:0', 'dense3/kernel:0', 'dense3/bias:0', 'dense4/kernel:0', 'dense4/bias:0', 'bn2/gamma:0', 'bn2/beta:0', 'dense5/kernel:0', 'dense5/bias:0', 'dense6/kernel:0', 'dense6/bias:0', 'bn3/gamma:0', 'bn3/beta:0', 'dense7/kernel:0', 'dense7/bias:0', 'dense8/kernel:0', 'dense8/bias:0', 'bn4/gamma:0', 'bn4/beta:0', 'dense9/kernel:0', 'dense9/bias:0', 'bn5/gamma:0', 'bn5/beta:0'].

Can anyone help to debug this error!!

Thanking you in advance.

Which part breaks the gradient flow? This type of code works in pytorch but here it breaks flow somewhere and I cannot figure out which part cause the problem.