Hello all,
i need help. Please …
i have a GAN-Architecture:
def define_D(in_shape = (64,256,4)):
model = Sequential()
model.add(Conv2D(128,(3,3), padding="same", input_shape = in_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(128,(3,3), strides=(2,2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(128,(3,3), strides=(2,2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(128,(3,3), strides=(2,2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(128,(3,3), strides=(2,2), padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1,activation="sigmoid"))
opt = Adam(learning_rate = 0.0003, beta_1 =0.5)
model.compile(loss= "binary_crossentropy", optimizer= opt, metrics=["accuracy"])
return model
model_D = define_D()
model_D.summary()
def define_G(latent_dim):
model = Sequential()
n_nodes = 256*64
model.add(Dense(n_nodes, input_dim = latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Reshape((4,16,256)))
model.add(Conv2DTranspose(128,(4,4),strides=(2,2),padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(128,(4,4),strides=(2,2),padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(128,(4,4),strides=(2,2),padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2DTranspose(128,(4,4),strides=(2,2),padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(4,(7,7), activation="tanh",padding="same"))
model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['mean_absolute_error'])
return model
model_G = define_G(100)
model_G.summary()
def define_GAN(model_G,model_D):
model_D.trainable = False
model = Sequential()
model.add(model_G)
model.add(model_D)
opt = Adam(learning_rate = 0.00015, beta_1=0.5)
model.compile(loss="binary_crossentropy", optimizer=opt)
return model
def generate_latent_points(latent_dim, n_samples):
X = np.random.randn (latent_dim * n_samples)
X = X.reshape(n_samples, latent_dim)
return X
#Generating fake images from Noise
def generate_fake_images(model_G, latent_dim,n_samples):
x_input = generate_latent_points(latent_dim,n_samples)
x = model_G.predict(x_input)
y = np.zeros((n_samples,1))
return x,y
def generate_real_images(dataset,n_samples):
i = np.random.randint(0, dataset.shape[0], n_samples)
X = dataset[i]
y = np.ones((n_samples,1))
return X,y
def train_GAN(model_G, model_D, model_GAN, dataset, latent_dim, n_epochs=1, n_batch=128):
bat_per_epo = int(dataset.shape[0] / n_batch)
half_batch = int(n_batch / 2)
l_P = generate_latent_points(100,1)
# manually enumerate epochs
for i in range(n_epochs):
# enumerate batches over the training set
for j in range(bat_per_epo):
X_real, y_real = generate_real_images(dataset, half_batch)
X_fake, y_fake = generate_fake_images(model_G, latent_dim, half_batch)
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real, y_fake))
d_loss, _ = model_D.train_on_batch(X, y)
X_gan = generate_latent_points(latent_dim, n_batch)
y_gan = np.ones((n_batch, 1))
g_loss = model_GAN.train_on_batch(X_gan, y_gan)
print('%d, %d/%d, d=%.3f, g=%.3f' % (i+1, j+1, bat_per_epo, d_loss, g_loss))
X = model_G.predict(l_P)
#plt.imshow(X[0,:,:,:])
#plt.show(block=False)
#plt.pause(1)
#plt.close()
Liste.append(X[0,:,:,:])
np.save('my_list2.npy', Liste)
ModelG.append(model_G)
ModelD.append(model_D)
latent_dim =100
model_GAN = define_GAN(model_G, model_D)
train_GAN(model_G, model_D,model_GAN,TrainingsSet, latent_dim)
model_G.save("Generator.h5")
model_D.save("Diskriminator.h5")
model_GAN.save("model_GAN.h5")
when i try to load the saved models. My train_GAN-loop does not work anymore. My Generator’s prediction will be always the same. The same as before saving.There is no learning progress anymore… I really need help. Is it because i interrupt the loop? and then try to safe. Because one epoch takes sooo long i have to do it by interrupting to get a safed checkpoint but it doesnt work.
Please can someone help me ?
MU SO