Hi,
I’m having trouble migrating this code to TF2. Could someone please help me?
input_image_size = args.img_resize
x = tf.placeholder(shape=input_shape, dtype=tf.float32, name="x")
y = tf.placeholder(dtype=tf.float32, name="y")
phase = tf.placeholder(tf.bool, name='phase')
global_step = tf.Variable(0, name='global_step', trainable=False)
out_seg, map_lambda1, map_lambda2 = architectures.ddunet(x,is_training)
y_out_dl = tf.round(out_seg)
x_acm = x[:, :, :, 0]
rounded_seg_acl = y_out_dl[:, :, :, 0]
dt_trans = tf.py_func(my_func, [rounded_seg_acl], tf.float32)
dt_trans.set_shape([args.batch_size, input_image_size, input_image_size])
phi_out,_, lambda1_tr, lambda2_tr = tf.map_fn(fn=active_contour_layer, elems=(x_acm, dt_trans, map_lambda1[:, :, :, 0], map_lambda2[:, :, :, 0]))
Dice = dice_soft(out_seg, y)
seg_loss = 1 - Dice
l2_loss = tf.losses.get_regularization_loss()
seg_loss += l2_loss
total_loss = seg_loss
rounded_seg = tf.round(out_seg)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
Dice_hard = dice_hard(out_seg, y)
adaptive_lr = tf.train.exponential_decay(args.lr, global_step, 100000, 0.96, staircase=False, name=None)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(adaptive_lr).minimize(total_loss, global_step=global_step,
colocate_gradients_with_ops=True, name='train_op')
The problem is the last line, I know it should be changed to:
with tf.control_dependencies(update_ops):
train_op = tf.optimizers.Adam().minimize(total_loss, var_list=???, tape=tf.GradientTape(persistent=False))
But I’m not sure what to put for var_list.
Thanks a lot for your help!