From 14e66a9964fe2fb0ce0f4d9c842855a2887e8873 Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 19:23:36 +0000 Subject: [PATCH] change some text and html stuff --- solution.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/solution.py b/solution.py index c85b685..2d5ed2d 100644 --- a/solution.py +++ b/solution.py @@ -395,8 +395,8 @@ def visualize_color_attribution(attribution, original_image): # # We will not be using the random latent code (green, in the figure), so the model we use is made up of three networks: # - The generator - this will be the bulk of the model, and will be responsible for transforming the images: we're going to use a `UNet` -# - The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel` # - The style encoder - this will be responsible for encoding the style of the image: we're going to use a `DenseModel` +# - The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel` # # Let's start by creating these! # %% @@ -579,10 +579,10 @@ def copy_parameters(source_model, target_model): # You were given several different options in the training code below. In each case, one of the options will work, and the other will not. # Comment out the option that you think will not work. # # Let's train the StarGAN one batch a time. # While you watch the model train, consider whether you think it will be successful at generating counterfactuals in the number of steps we give it. What is the minimum number of iterations you think are needed for this to work, and how much time do yo uthink it will take?