I am trying to separate the preprocessing part and the NN layers in my keras model but getting an error. I used tf 2.5.1. It is based on this post in tensorflow blogs link. Appreciate any help in debugging the code. Below is my implementation and error I am getting:
Code:
def forward_pass(y, num_classes=3, learn_rate=False):
## Model architecture
initializer = tf.keras.initializers.HeNormal()
initial_bias = tf.keras.initializers.Constant(-np.log(1/num_classes))
if learn_rate:
learning_rate=0.001
#all_features = tf.keras.layers.concatenate(encoded_features, name='concat_features')
x = tf.keras.layers.Dense(1024, activation='relu', kernel_initializer = initializer, name = 'dnn_0', dtype='float32')(y)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5, name='drop_0')(x)
x = tf.keras.layers.Dense(1024, activation='relu', kernel_initializer = initializer, name = 'dnn_1', dtype='float32')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5, name='drop_1')(x)
x = tf.keras.layers.Dense(512, activation='relu', kernel_initializer = initializer, name = 'dnn_2', dtype='float32')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5, name='drop_2')(x)
x = tf.keras.layers.Dense(512, activation='relu', kernel_initializer = initializer, name = 'dnn_3', dtype='float32')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5, name='drop_3')(x)
x = tf.keras.layers.Dense(256, activation='relu', kernel_initializer = initializer, name = 'dnn_4', dtype='float32')(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dropout(0.5, name='drop_4')(x)
x = tf.keras.layers.Dense(64, activation='relu', kernel_initializer = initializer, name = 'dnn_5', dtype='float32')(x)
#x = tf.keras.layers.BatchNormalization()(x)
#x = tf.keras.layers.Dropout(0.5, name='drop_4')(x)
#x = tf.keras.layers.Dense(32, activation='relu', kernel_initializer = initializer, name = 'dnn_5', dtype='float32')(x)
#x = tf.keras.layers.BatchNormalization()(x)
#x = tf.keras.layers.Dropout(0.5, name='drop_5')(x)
output = tf.keras.layers.Dense(num_classes, activation='softmax', kernel_initializer = initializer, bias_initializer = initial_bias,
name='pred', dtype='float32')(x)
return output
preprocessed_inputs = tf.keras.layers.concatenate(encoded_features, name='concat_features', axis=-1)
# The first model will only apply preprocessing.
preprocessing_model = tf.keras.Model(all_inputs, preprocessed_inputs)
# Apply preprocessing asynchronously with tf.data.
# It is important to call prefetch and remember the AUTOTUNE options.
preprocessed_train_ds = train_ds.map(
lambda x, y: (preprocessing_model(x), y),
num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE)
outputs = forward_pass(preprocessed_inputs)
training_model = tf.keras.Model(preprocessed_inputs, outputs)
training_model.compile(optimizer=tf.keras.optimizers.Nadam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics = ["accuracy"])
# Now the GPU can focus on the training part of the model.
training_model.fit(preprocessed_train_ds, epochs=3)
Error:
ValueError: Graph disconnected: cannot obtain value for tensor KerasTensor(type_spec=TensorSpec(shape=(None, 1), dtype=tf.string, name='dp_as_number_y_n'), name='dp_as_number_y_n', description="created by layer 'dp_as_number_y_n'") at layer "sequential". The following previous layers were accessed without issue: []