I compute a keras model as follows;
skf = StratifiedKFold(n_splits=n_splits, shuffle=False)
for k, (train_index, test_index) in enumerate(skf.split(inputs, targets)):
print(f"trait {trait_idx + 1}: {trait_dict[trait]}, fold: {k}")
x_train, x_test = inputs[train_index], inputs[test_index]
y_train, y_test = targets[train_index], targets[test_index]
# converting to one-hot embedding
y_train = tf.keras.utils.to_categorical(y_train, num_classes=n_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=n_classes)
model = tf.keras.models.Sequential()
# define the neural network architecture
model.add(
tf.keras.layers.Dense(50, input_dim=hidden_dim, activation="relu")
)
model.add(tf.keras.layers.Dense(n_classes))
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=["mse", "accuracy"],
)
history = model.fit(
x_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test),
verbose=0,
)
and I put in some test data to get a prediction.
for _trait, _model in dict_models.items():
script = tf.convert_to_tensor(inputs, dtype=float)
_results = _model(script, training=False)
#_results = _model.predict_classes(script)
_results_numeric = _results.numpy()
pred = np.argmax(_results, axis=1)
authors = author_ids[0]
print(f"results for {dict_trait[_trait]}")
print(f" {authors[0]}: {pred[0]} : {_results_numeric[0, :]}")
print(f" {authors[1]}: {pred[1]} : {_results_numeric[1, :]}")
What I would like from the results is a probability that the class prediction is correct. The results scores I am getting are outside of the 0 - 1 range and so are not probabilities. So how do I fix this?