I’m using tensorflow 2.3.0 but I disable v2 behaviour. I’m struglling to find way to differentiate the loss with respect to input and update the input instead of model weight.I use input as tf.Variable but it always has error : zip argument #2 must support iteration. Can anyone give me an example of how to apply gradient descent wrt input (Not change network at this step)? Or please helpe me fix this code:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
x = tf.Variable(name =“x”,initial_value = np.array([[3.0,3.0],[2.0,2.0]]).astype(np.float32),dtype = tf.float32)
y = tf.Variable(name =“y”,initial_value = np.array([[3.0],[4.0]]).astype(np.float32),dtype = tf.float32)
w1 = tf.Variable(tf.ones([2,3]))
w2 = tf.Variable(tf.ones([3,1]))
hidden = tf.matmul(tf.transpose(x),w1)
output = tf.matmul(hidden,w2)
loss = output - y
optimizer = tf.train.GradientDescentOptimizer(1)
gradient = optimizer.compute_gradients(loss,y)
minimize =optimizer.apply_gradients(zip(gradient, x))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(“before gradient descent”)
print(“x—\n”,x.eval(),“\n”,“y—\n”,y.eval())
print(“w1—\n”,w1.eval(),“\n”,“w2—\n”,w2.eval())
x_,y_,w1_,w2_,hidden_,output_,loss_,gradient_ = sess.run([x,y,w1,w2,hidden,output,loss,gradient])
print("*****after gradient descent*****")
print("w1---\n",w1_,"\n","w2---\n",w2_)
print("x---\n",x_,"\n","y_---\n",y_)
print("output_---\n",output_)
print("hidden_---\n",hidden_)
print("loss_---\n",loss_,"\n")
print("gradient_---\n",gradient_,"\n")