I have following model defined (below) and using fit to train. But during training, I would like to print out the dataset or at least its shape/dimensions for troubleshooting purpose. Is it possble?
model=keras.models.Sequential([\
# keras.layers.Conv2D(64, 7, activation="relu", padding="same", input_shape=[28, 28, 1]),\
keras.layers.Conv2D(64, 7, activation="relu", padding="same", input_shape=[28, 28, 1]),\
keras.layers.MaxPooling2D(2), \
keras.layers.Conv2D(128, 3, activation="relu", padding="same"), \
keras.layers.Conv2D(128, 3, activation="relu", padding="same"), \
keras.layers.MaxPooling2D(2), \
keras.layers.Conv2D(256, 3, activation="relu", padding="same"), \
keras.layers.Conv2D(256, 3, activation="relu", padding="same"), \
keras.layers.MaxPooling2D(2), \
keras.layers.Flatten(), \
keras.layers.Dense(128, activation="relu"), \
keras.layers.Dropout(0.5), \
keras.layers.Dense(64, activation="relu"), \
keras.layers.Dropout(0.5), \
keras.layers.Dense(10, activation="softmax") \
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])
history=model.fit(X_train, y_train, epochs=CONFIG_EPOCHS, batch_size=CONFIG_BATCH_SIZE, validation_data=(X_valid, y_valid))
and I am doigng exactly same thing in pytorch and this is how pytorch conveniently allows printing of x between layers:
class MLP(Module):
prepare the dataset
# define model elements
def init(self):
super(MLP, self).init()
self.conv1 = Conv2d(1, 64, 7, padding=“same”)
self.act1 = ReLU()
self.maxpool1 = MaxPool2d(2)
…
…
def forward(self, X):
if DEBUG:
print("forward entered: X: ", X.size())
printdbg("X: " + str(X.size()))
X = self.conv1(X)
X = self.act1(X)
printdbg("X, conv1/act1: " + str(X.size()))
X = self.maxpool1(X)
printdbg("X, maxpool1: " + str(X.size()))
X = self.conv2a(X)
X = self.act2a(X)
printdbg("X, conv2a/act2a: " + str(X.size()))
X = self.conv2b(X)
X = self.act2b(X)
def printdbg(msg):
if DEBUG_PRT:
print(msg)