From c1a6e28b5ebd13e9169f02bfdd6106391107efc2 Mon Sep 17 00:00:00 2001 From: Diane Adjavon Date: Thu, 15 Aug 2024 10:47:03 -0400 Subject: [PATCH] Fix numbering, missing todos, and plotting bug --- solution.py | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/solution.py b/solution.py index 1e3bc59..318c44e 100644 --- a/solution.py +++ b/solution.py @@ -469,7 +469,7 @@ def forward(self, x, y): # We will have two different optimizers, one for the Generator and one for the Discriminator. # # %% -optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=1e-4) +optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=1e-5) optimizer_g = torch.optim.Adam(generator.parameters(), lr=1e-4) # %% [markdown] tags=[] # @@ -545,7 +545,7 @@ def copy_parameters(source_model, target_model): generator_ema = generator_ema.to(device) # %% [markdown] tags=[] -#

Task 3.2: Training!

+#

Task 3.3: Training!

# You were given several different options in the training code below. In each case, one of the options will work, and the other will not. # Comment out the option that you think will not work. #
    @@ -760,7 +760,7 @@ def copy_parameters(source_model, target_model): #
# %% [markdown] tags=[] -# # Part 4: Evaluating the GAN +# # Part 4: Evaluating the GAN and creating Counterfactuals # %% [markdown] tags=[] # ## Creating counterfactuals @@ -777,7 +777,7 @@ def copy_parameters(source_model, target_model): for i in range(4): - options = np.where(test_mnist.targets == i)[0] + options = np.where(test_mnist.conditions == i)[0] # Note that you can change the image index if you want to use a different prototype. image_index = 0 x, y = test_mnist[options[image_index]] @@ -795,7 +795,7 @@ def copy_parameters(source_model, target_model): # %% [markdown] # Now we need to use these prototypes to create counterfactual images! # %% [markdown] -#

Task 4.1: Create counterfactuals

+#

Task 4: Create counterfactuals

# In the below, we will store the counterfactual images in the `counterfactuals` array. # #
    @@ -887,9 +887,6 @@ def copy_parameters(source_model, target_model): #
#
-# %% [markdown] -# # Part 5: Highlighting Class-Relevant Differences - # %% [markdown] # At this point we have: # - A classifier that can differentiate between image of different classes @@ -954,7 +951,7 @@ def visualize_color_attribution_and_counterfactual( # - Used the counterfactual images to highlight the differences between classes # # %% [markdown] -# # Part 6: Exploring the Style Space, finding the answer +# # Part 5: Exploring the Style Space, finding the answer # By now you will have hopefully noticed that it isn't the exact color of the image that determines its class, but that two images with a very similar color can be of different classes! # # Here is an example of two images that are very similar in color, but are of different classes. @@ -1002,15 +999,17 @@ def visualize_color_attribution_and_counterfactual( plt.show() # %% [markdown] -#

Task 6.2: Adding color to the style space

+#

Task 5.1: Adding color to the style space

# We know that color is important. Does interpreting the style space as colors help us understand better? # # Let's use the style space to color the PCA plot. # (Note: there is no code to write here, just run the cell and answer the questions below) #
-# TODO WIP HERE # %% -normalized_styles = (styles - np.min(styles, axis=1)) / styles.ptp(axis=1) +styles = np.array(styles) +normalized_styles = (styles - np.min(styles, axis=1, keepdims=True)) / np.ptp( + styles, axis=1, keepdims=True +) # Plot the PCA again! plt.figure(figsize=(10, 10)) @@ -1027,27 +1026,22 @@ def visualize_color_attribution_and_counterfactual( #
  • Can you see any patterns in the colors? Is the space smooth, for example?
  • # # %% [markdown] -#

    Using the images to color the style space

    +#

    Task 5.2: Using the images to color the style space

    # Finally, let's just use the colors from the images themselves! -# All of the non-zero values in the image can be averaged to get a color. +# The maximum value in the image (since they are "black-and-color") can be used as a color! # # Let's get that color, then plot the style space again. # (Note: once again, no coding needed here, just run the cell and think about the results with the questions below) #
    # %% tags=["solution"] -tol = 1e-6 - -colors = [] -for x, y in random_test_mnist: - non_zero = x[x > tol] - colors.append(non_zero.mean(dim=(1, 2)).cpu().numpy().squeeze()) +colors = [np.max(x.numpy(), axis=(1, 2)) for x, _ in random_test_mnist] # Plot the PCA again! plt.figure(figsize=(10, 10)) plt.scatter( styles_pca[:, 0], styles_pca[:, 1], - c=normalized_styles, + c=colors, ) plt.show()