#python #keras #autoencoder #generative-adversarial-network
Вопрос:
Я пытаюсь запустить состязательный автоэнкодер, используя метод keras Fit в классе keras.model, но по какой-то причине он не работает.
Имейте в виду, что я пытался обновить кодер и декодер одновременно. Я попытался передать потерю диска кодировщику с потерей восстановления и без нее
Потери при восстановлении остались прежними, в то время как потери на диске кодера продолжали увеличиваться по мере того, как собственные потери дискриминатора продолжали снижаться.
discriminator = keras.Sequential( [ keras.Input(shape=(4, 4, 128)), layers.Flatten(), layers.Dense(128, activation="relu"), layers.Dense(128, activation="relu"), layers.Dense(128, activation="relu"), layers.Dense(1, activation="sigmoid"), ], name="discriminator", ) discriminator.summary() encoder = keras.Sequential( [ keras.Input(shape=(28, 28, 1)), layers.Conv2D(24, 3, activation="relu", strides=2, padding="same"), layers.Conv2D(48, 3, activation="relu", strides=2, padding="same"), layers.Conv2D(96, 3, activation="relu", strides=2, padding="same"), layers.Flatten(), layers.Dense(4 * 4 * 128, activation="linear"), layers.Reshape((4, 4, 128)), ], name="encoder", ) encoder.summary() decoder = keras.Sequential( [ keras.Input(shape=(4, 4, 128)), layers.Flatten(), layers.Dense(7 * 7 * 64, activation="relu"), layers.Reshape((7, 7, 64)), layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same"), layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same"), layers.Conv2DTranspose(1, 3, activation="sigmoid", strides=1, padding="same"), ], name="decoder", )
Я не уверен, что в самой модели этого нет. Для этого я использую набор данных MNIST
class AAE(keras.Model): def __init__(self, encoder, decoder, discriminator): super(AAE, self).__init__() self.encoder = encoder self.decoder = decoder self.discriminator = discriminator self.total_loss_tracker = keras.metrics.Mean(name="total_loss") self.reconstruction_loss_tracker = keras.metrics.Mean(name="reconstruction_loss") self.disc_tracker = keras.metrics.Mean(name="disc_loss") self.discEnc_tracker = keras.metrics.Mean(name="discEnc_loss") @property def metrics(self): return [ self.total_loss_tracker, self.reconstruction_loss_tracker, self.disc_tracker, self.discEnc_tracker, ] def compile(self, di_optimizer, e_optimizer,de_optimizer, loss_fn): super(AAE, self).compile() self.dis_optimizer = di_optimizer self.e_optimizer = e_optimizer self.de_optimizer = de_optimizer self.lossBCE = loss_fn[0] self.lossMAE = loss_fn[1] def train_step(self, data): latent = self.encoder(data) batch_size = 200 dists = tf.random.normal((batch_size,4,4,128)) y_real = tf.ones((batch_size, 1)) y_fake = tf.zeros((batch_size, 1)) real_dist_mix = tf.concat((dists, latent),axis=0) y_real_fake_mix = tf.concat((y_real, y_fake),axis=0) with tf.GradientTape() as tape: predictions = self.discriminator(real_dist_mix) d_loss = self.lossBCE(y_real_fake_mix, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.dis_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_weights)) with tf.GradientTape() as Etape, tf.GradientTape() as Dtape: latent = self.encoder(data) reconstruction = self.decoder(latent) reconstruction_loss = self.lossMAE(data, reconstruction) total_loss = reconstruction_loss Egrads = Etape.gradient(total_loss, self.encoder.trainable_weights) self.e_optimizer.apply_gradients(zip(Egrads, self.encoder.trainable_weights)) Dgrads = Dtape.gradient(total_loss, self.decoder.trainable_weights) self.de_optimizer.apply_gradients(zip(Dgrads, self.decoder.trainable_weights)) with tf.GradientTape() as tape: latent = self.encoder(data) predictions = self.discriminator(latent) e_loss = self.lossBCE(y_fake, predictions) grads = tape.gradient(e_loss, self.encoder.trainable_weights) self.e_optimizer.apply_gradients(zip(grads, self.encoder.trainable_weights)) self.total_loss_tracker.update_state(total_loss) self.reconstruction_loss_tracker.update_state(reconstruction_loss) self.disc_tracker.update_state(d_loss) self.discEnc_tracker.update_state(e_loss) return { "loss": self.total_loss_tracker.result(), "reconstruction_loss": self.reconstruction_loss_tracker.result(), "disc_loss": self.disc_tracker.result(), "discEnc_loss": self.discEnc_tracker.result(), }
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data() mnist_digits = np.concatenate([x_train, x_test], axis=0) mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255 Aae = AAE(encoder, decoder, discriminator) #vae.compile(optimizer=keras.optimizers.Adam()) Aae.compile( di_optimizer=keras.optimizers.Adam(learning_rate=0.00001), e_optimizer=keras.optimizers.Adam(learning_rate=0.0001), de_optimizer=keras.optimizers.Adam(learning_rate=0.0001), loss_fn=[tf.keras.losses.BinaryCrossentropy(),tf.keras.losses.MeanAbsoluteError()] ) h=Aae.fit(mnist_digits, epochs=15, batch_size=200)
Ответ №1:
Я думаю, что ошибка здесь:
with tf.GradientTape() as tape: latent = self.encoder(data) predictions = self.discriminator(latent) e_loss = self.lossBCE(y_fake, predictions) grads = tape.gradient(e_loss, self.encoder.trainable_weights) self.e_optimizer.apply_gradients(zip(grads, self.encoder.trainable_weights))
Я бы поставил e_loss = self.lossBCE(y_real, predictions)
, потому что кодировщик пытается обмануть дискриминатор.