diff --git a/datascienceintro/solutions/Solution_PyTorch_MNIST.ipynb b/datascienceintro/solutions/Solution_PyTorch_MNIST.ipynb
index 79e3bcf5c6027e36e0e250f396dec65d58e5f259..8419a6a6df37c57786c2e8d7e58e5c486a9ec7e6 100644
--- a/datascienceintro/solutions/Solution_PyTorch_MNIST.ipynb
+++ b/datascienceintro/solutions/Solution_PyTorch_MNIST.ipynb
@@ -294,6 +294,8 @@
         "      # #output = Floor (  (#input - #filter)/stride +1)\n",
         "      #\n",
         "      # here: 1* 64 * ( (26-2)/2) * ( (26-2)/2) = 9216\n",
+        "      # ( after two convolutional layers, with 64 filters in the second, and the down-sizing with the CNN layers and pooling, we\n",
+        "      #   to from a 28x28 pixel image (one colour channel) to a size of 12x12, for 64 filters )\n",
         "      self.fc1 = nn.Linear(9216, 128)\n",
         "      self.fc2 = nn.Linear(128, 10)\n",
         "      self.dropout1 = nn.Dropout2d(0.25)\n",