below is low-level api logistic regression code which is giving correct output:
## initialize part
print_cost=True
num_epochs = 10
learning_rate = 0.005
w = tf.Variable(tf.zeros(shape=(X.shape[0],1))) # shape=(2,1)
b = tf.Variable(0.0)
optimizer = tf.keras.optimizers.SGD(learning_rate)
accuracy = tf.keras.metrics.BinaryAccuracy()
## training part
for i in range(num_epochs):
# forward pass
with tf.GradientTape() as tape:
z = tf.add(tf.matmul(tf.transpose(w), X), b)
bce = tf.keras.losses.BinaryCrossentropy( from_logits=True, label_smoothing=0.0, axis=-1, reduction='sum', name='binary_crossentropy')
cost = bce(Y, z)
# backward pass
trainable_variables = [w, b]
grads = tape.gradient(cost, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
# Print the cost every 100 training epochs
if print_cost:
accuracy.update_state(Y, tf.sigmoid(z))
print(f"Epoch {i+1}/{num_epochs} - Cost:{cost.numpy()} - Accuracy:{accuracy.result().numpy()}")
accuracy.reset_states()
Below is high-level api code which needs correction as giving incorrect output:
# Create the model
model = tf.keras.models.Sequential()
model.add( tf.keras.layers.Dense(units=1, activation='sigmoid', input_shape=(X.shape[0],),
kernel_initializer=tf.keras.initializers.Zeros(), bias_initializer=tf.keras.initializers.Zeros()) )
# Compile the model
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.005),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.BinaryAccuracy()],
)
# Train the model
model.fit(tf.transpose(X), tf.transpose(Y), epochs=10, shuffle=False, batch_size=1)