diff --git a/ESM_01_introduction_to_pytorch.ipynb b/ESM_01_introduction_to_pytorch.ipynb
new file mode 100644
index 0000000..e96863b
--- /dev/null
+++ b/ESM_01_introduction_to_pytorch.ipynb
@@ -0,0 +1,5884 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 5,
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "id,colab,colab_type,-all",
+ "formats": "ipynb,py:percent",
+ "main_language": "python"
+ },
+ "papermill": {
+ "default_parameters": {},
+ "duration": 21.925345,
+ "end_time": "2021-09-16T12:33:06.344225",
+ "environment_variables": {},
+ "exception": null,
+ "input_path": "course_UvA-DL/01-introduction-to-pytorch/Introduction_to_PyTorch.ipynb",
+ "output_path": ".notebooks/course_UvA-DL/01-introduction-to-pytorch.ipynb",
+ "parameters": {},
+ "start_time": "2021-09-16T12:32:44.418880",
+ "version": "2.3.3"
+ },
+ "colab": {
+ "name": "ESM 01-introduction-to-pytorch.ipynb",
+ "provenance": [],
+ "include_colab_link": true
+ },
+ "language_info": {
+ "name": "python"
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ }
+ },
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.050411,
+ "end_time": "2021-09-16T12:32:45.750290",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:45.699879",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "eaced3d8"
+ },
+ "source": [
+ "\n",
+ "# Tutorial 1: Introduction to PyTorch\n",
+ "\n",
+ "* **Author:** Phillip Lippe\n",
+ "* **License:** CC BY-SA\n",
+ "* **Generated:** 2021-09-16T14:32:16.770882\n",
+ "\n",
+ "This tutorial will give a short introduction to PyTorch basics, and get you setup for writing your own neural networks.\n",
+ "This notebook is part of a lecture series on Deep Learning at the University of Amsterdam.\n",
+ "The full list of tutorials can be found at https://uvadlc-notebooks.rtfd.io.\n",
+ "\n",
+ "\n",
+ "---\n",
+ "Open in [![Open In Colab](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHUAAAAUCAYAAACzrHJDAAAIuUlEQVRoQ+1ZaVRURxb+qhdolmbTUVSURpZgmLhHbQVFZIlGQBEXcMvJhKiTEzfigjQg7oNEJ9GMGidnjnNMBs2czIzajksEFRE1xklCTKJiQLRFsUGkoUWw+82pamn79etGYoKek1B/4NW99/tu3e/dquJBAGD27NkHALxKf39WY39gyrOi+i3xqGtUoePJrFmznrmgtModorbTu8YRNZk5cybXTvCtwh7o6NR2KzuZMWNGh6jtVt7nA0ymT5/eJlF9POrh7PAQl6s8bGYa3PUum//htmebVtLRqW0q01M5keTk5FZFzU0oRle3+zxwg5Hgtb+PZiL/ZVohxCI+hL5JgjmfjPxZ26+33BG3dA+ealHPM4gQAo5rU59gsI8bRvl54t3Ca62mvHyUAhtOlLd5WSQpKcluBjumnoCLs1EARkVd9E8l3p9y2i7RbQ1B6pFwu/YDgW8KbHJHMTQrwnjz2oZm9M4pavOCfo5jWrgCaaMVcMs6/pNhDr0+AMN93XlxV7R6DNpyzi7W/OE+yIrsjU6rTrbKV5cd/pNyItOmTbMp6sbBB+EqaYJY4cWE3VUciNt1TpgfcRFv71Fi54xT5kSoyLvOBEJMOMxWXkFlBeBSX4u6Zkcs+3KszYRtiapbNRqF31UgetVuc8z9vBXIv1qD+F1f83B6uDlCUyfsZGepGPpmg01OB7EITQbhS9ribKy+DmP1DUiClLz4bnIHVOqa7BY+Z1wg5g3zgUvyehiNpnJKxSLc/ts76LKm0BzX3c0RNy1yXjDcB5lWoro4iNHQxM+f1kWeWQARAWQS++trISJTp061Kep25X/MycwtjuctSC5rxo7ppi7VNUox5+PhPHtrsS2O1qJ6yx1QujQUzm9sh6hbkBlvvGcN8hYnwjUjH6kjfZEd5c/jitz5Jc5U3ENnFynKl4eB7nyEgP2UZ+Yz3/rVEbyYr27qELrtC4FIC0J7sc7xWnmccdHfRRTs0VB+cA4lt+oFcRR/wUeH8FG5w2Mbx8FQ8TXEvv1xYf4wBP3O2WyL3/UVjpXWgIqaFeUPr+wTmDvUB7njH6/bOv+HRg4SqioAg5GDe1aB3ZeMTJkyRSBqkLsWqSEm0fZVBEN94zEZnYvrdx1JL5cxe+a+AbhSJecRRHW/ikTFRTa38dtQlNZ5CRKwFvUtZU/kvBoEF9Uxni/XqIM+dwKbTw3rhcxIf7gmr2M+H6SMwx8iBzJbw5oxeG3Lv5FX9B3AGaHPS8e8z77H7v9VMpvPG5ug1enh7eGK8h0LBTwUb+GInqzInlRUK65DmTPQu4c3+uQKjwKK77zwUxBX4Tq7yR1RuiwUsqlrABCM6esHdXoy47fk4+prYKy8ZF574x4V5BnHQBuf4g9Z9ld8U36L2aktZNNplNfw7zotwWTy5MkCUft4aLEopJj5/OPHl1BQqeAVOnHgNSQOqmBzq9V9cfEm/yx5ubMGKS9cYPZ3vx2OS/c6PVHUuUO7Y1Pci3BO/1zgq18byebfGemLtNF+6JRtOvMk926ibussZqM+1mNz4TWkH7rCbM5phwGRGDAaoF8fY5OHFnlldAA8sgoEXKnDukA1NgSeNjqkJT9brbN4pC9WRweYXyLugR73c+MYvyWfu0yC6+mjzN1Isfw3FKJS98CU/zI1IHFkFPR52cHL2FJk0sB6kMTERIGo9GzcPkLNfA0cwdwi/hfEYO86ZMd9w+y1egfM2T2Eh/vesMNwljSzuZRT420SW3eqy8N6aHMmwmnFUZ7/PGVPbIoNZvNU1BURdHs0bT2+HjL8sDSM2e6vi4Lj5NW8WOLVA6RTT2azxLV+bglaFNqLieqemS/gWkw7NyoAHo+2dEsiivengjKsPFoqWOvbSh/kxPaxyW/JRzH2Fl3EzD9/xjAefJqB3usKUFn/0Gb+S/d/jy3FN2yLOmnSJJtn6oehByEiHPSeXnDxFGPRnoFoaBJjcdQlbDwcjL1zTNuQpoxD7R0OG0uUTMi0fkVwdzBdYIwcwZunxrVJVLplNm54BZp7jfDfYLoNyqQi1K6KxIdHzmN+QQ2WjFIwUT2zTGdlRXo4NFXVUO4sgX5dFC7f0aP/ZlNeUjFBuL8Xjl6uRuP6aMjSjpjzsH62FDU7JhBuGccEXIvDfJFFBc/gHw80dklfCVYnRaDfpiJcutPA4F7qJsfJeUPQI+1fqMlNhFx1FM0GDqkjFVg7NojlQ0Vt4aM5ReSqcbpaCg8nCW5lRsBvbT4T1TLfFptsfh7gItzuKTdJSEiwKSrt1vcmnEXXrsLbYnWDA1bu+z2WKy9Arq+1KRqdfKsoBo0GcdtEpS/B1bO4v0cFiUhkjskvKcMrWwtAPHuwQq8Z+4LZ1vTQANfXt4J0DwZX9gWa9qh4XDM/voC9JXfwYEMMHJcfNtusn82ihvliVUwg5KrPGVf6GH94ZJpEZBen6EC4qYTHA1dXhW0JIex8txzv//c8lhzXIi/BFxOH9jGbQhZsRalTIBZZ8KkGyZAxeRQvXkFF1TWz/Hm46jNYUnjPbt3JxIkT7f6dSj8qfJJyVvBxgaIlblOyjtysNHWN9fjjqWi7glJfW3/S0Hlj2XnA8PhKT9w6g3Qx3XiXhvuxQsuT1proxBKI/AaZqY1Xz5muvY8G8XkRRCaHsfQsRAFDH/tZPbcYuHotOG0FRIqB4HR3wNVoIPLtz8ycTguu+jpEigE218vd1YCr5m+HpHMvEI9u4LTXwNWaLjl0iPwGAmIpeHx1VeCqTJdPs1/vweweQPO3HC24NhOhnTphwoQnfv6QSY2ICbkNmdSA4h87oaLaiYfn5diIEd4att2erOwJXbPUHp953p6orQVSUVWRAXBT8c/dJ5L9xhzaJGp71GR/wFP8P5V2z10NSC9T93QM2xUg8fHxT+zU9ijeU4naHon8CjFJXFzc8/kn+dN06q9QgF98SYSo2Xen2NjYZy5sR6f+4nLSK5Iam2PH/x87a1YN/t5sBgAAAABJRU5ErkJggg==){height=\"20px\" width=\"117px\"}](https://colab.research.google.com/github/PytorchLightning/lightning-tutorials/blob/publication/.notebooks/course_UvA-DL/01-introduction-to-pytorch.ipynb)\n",
+ "\n",
+ "Give us a ⭐ [on Github](https://www.github.com/PytorchLightning/pytorch-lightning/)\n",
+ "| Check out [the documentation](https://pytorch-lightning.readthedocs.io/en/latest/)\n",
+ "| Join us [on Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-pw5v393p-qRaDgEk24~EjiZNBpSQFgQ)"
+ ],
+ "id": "eaced3d8"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.042559,
+ "end_time": "2021-09-16T12:32:45.835806",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:45.793247",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "fa480fc6"
+ },
+ "source": [
+ "## Setup\n",
+ "This notebook requires some packages besides pytorch-lightning."
+ ],
+ "id": "fa480fc6"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.043036,
+ "end_time": "2021-09-16T12:32:46.013859",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:45.970823",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "9473f942"
+ },
+ "source": [
+ "
\n",
+ "Welcome to our PyTorch tutorial for the Deep Learning course 2020 at the University of Amsterdam!\n",
+ "The following notebook is meant to give a short introduction to PyTorch basics, and get you setup for writing your own neural networks.\n",
+ "PyTorch is an open source machine learning framework that allows you to write your own neural networks and optimize them efficiently.\n",
+ "However, PyTorch is not the only framework of its kind.\n",
+ "Alternatives to PyTorch include [TensorFlow](https://www.tensorflow.org/), [JAX](https://github.com/google/jax#quickstart-colab-in-the-cloud) and [Caffe](http://caffe.berkeleyvision.org/).\n",
+ "We choose to teach PyTorch at the University of Amsterdam because it is well established, has a huge developer community (originally developed by Facebook), is very flexible and especially used in research.\n",
+ "Many current papers publish their code in PyTorch, and thus it is good to be familiar with PyTorch as well.\n",
+ "Meanwhile, TensorFlow (developed by Google) is usually known for being a production-grade deep learning library.\n",
+ "Still, if you know one machine learning framework in depth, it is very easy to learn another one because many of them use the same concepts and ideas.\n",
+ "For instance, TensorFlow's version 2 was heavily inspired by the most popular features of PyTorch, making the frameworks even more similar.\n",
+ "If you are already familiar with PyTorch and have created your own neural network projects, feel free to just skim this notebook.\n",
+ "\n",
+ "We are of course not the first ones to create a PyTorch tutorial.\n",
+ "There are many great tutorials online, including the [\"60-min blitz\"](https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html) on the official [PyTorch website](https://pytorch.org/tutorials/).\n",
+ "Yet, we choose to create our own tutorial which is designed to give you the basics particularly necessary for the practicals, but still understand how PyTorch works under the hood.\n",
+ "Over the next few weeks, we will also keep exploring new PyTorch features in the series of Jupyter notebook tutorials about deep learning.\n",
+ "\n",
+ "We will use a set of standard libraries that are often used in machine learning projects.\n",
+ "If you are running this notebook on Google Colab, all libraries should be pre-installed.\n",
+ "If you are running this notebook locally, make sure you have installed our `dl2020` environment ([link](https://github.com/uvadlc/uvadlc_practicals_2020/blob/master/environment.yml)) and have activated it."
+ ],
+ "id": "9473f942"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:45.924885Z",
+ "iopub.status.busy": "2021-09-16T12:32:45.924409Z",
+ "iopub.status.idle": "2021-09-16T12:32:45.927196Z",
+ "shell.execute_reply": "2021-09-16T12:32:45.926697Z"
+ },
+ "id": "a1f58dc1",
+ "lines_to_next_cell": 0,
+ "papermill": {
+ "duration": 0.048784,
+ "end_time": "2021-09-16T12:32:45.927310",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:45.878526",
+ "status": "completed"
+ },
+ "tags": []
+ },
+ "source": [
+ "# ! pip install --quiet \"torchmetrics>=0.3\" \"matplotlib\" \"torch>=1.6, <1.9\" \"pytorch-lightning>=1.3\""
+ ],
+ "id": "a1f58dc1",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:46.106552Z",
+ "iopub.status.busy": "2021-09-16T12:32:46.106081Z",
+ "iopub.status.idle": "2021-09-16T12:32:46.889364Z",
+ "shell.execute_reply": "2021-09-16T12:32:46.889833Z"
+ },
+ "papermill": {
+ "duration": 0.833305,
+ "end_time": "2021-09-16T12:32:46.889977",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:46.056672",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "edcfda76"
+ },
+ "source": [
+ "import time\n",
+ "\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import torch\n",
+ "import torch.nn as nn\n",
+ "import torch.utils.data as data\n",
+ "\n",
+ "# %matplotlib inline\n",
+ "from IPython.display import set_matplotlib_formats\n",
+ "from matplotlib.colors import to_rgba\n",
+ "from tqdm.notebook import tqdm # Progress bar\n",
+ "\n",
+ "#EM HAD TO UPDATE and import the following 2 new lines to handle the deprecated \"set_matplotlib_inline\" by importing new packages (line 16) and attributes (line 17), showm immediately below, and THEN running it properly on the next line \n",
+ "\n",
+ "import matplotlib_inline\n",
+ "import matplotlib_inline.backend_inline\n",
+ "\n",
+ "matplotlib_inline.backend_inline.set_matplotlib_formats(\"svg\",\"pdf\")\n",
+ "#EM DEPRECATED: set_matplotlib_formats(\"svg\", \"pdf\")"
+ ],
+ "id": "edcfda76",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.042985,
+ "end_time": "2021-09-16T12:32:46.977014",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:46.934029",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "d9625e8f"
+ },
+ "source": [
+ "## The Basics of PyTorch\n",
+ "\n",
+ "We will start with reviewing the very basic concepts of PyTorch.\n",
+ "As a prerequisite, we recommend to be familiar with the `numpy` package as most machine learning frameworks are based on very similar concepts.\n",
+ "If you are not familiar with numpy yet, don't worry: here is a [tutorial](https://numpy.org/devdocs/user/quickstart.html) to go through.\n",
+ "\n",
+ "So, let's start with importing PyTorch.\n",
+ "The package is called `torch`, based on its original framework [Torch](http://torch.ch/).\n",
+ "As a first step, we can check its version:"
+ ],
+ "id": "d9625e8f"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.065771Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.065287Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.067941Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.067546Z"
+ },
+ "papermill": {
+ "duration": 0.048411,
+ "end_time": "2021-09-16T12:32:47.068040",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.019629",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "eb6179df",
+ "outputId": "b5feae1c-ec97-4692-81f8-2421ea095835"
+ },
+ "source": [
+ "print(\"Using torch\", torch.__version__)"
+ ],
+ "id": "eb6179df",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Using torch 1.8.1+cu102\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.04343,
+ "end_time": "2021-09-16T12:32:47.154839",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.111409",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "8a8171cd"
+ },
+ "source": [
+ "At the time of writing this tutorial (mid of August 2021), the current stable version is 1.9.\n",
+ "You should therefore see the output `Using torch 1.9.0`, eventually with some extension for the CUDA version on Colab.\n",
+ "In case you use the `dl2020` environment, you should see `Using torch 1.6.0` since the environment was provided in October 2020.\n",
+ "It is recommended to update the PyTorch version to the newest one.\n",
+ "If you see a lower version number than 1.6, make sure you have installed the correct the environment, or ask one of your TAs.\n",
+ "In case PyTorch 1.10 or newer will be published during the time of the course, don't worry.\n",
+ "The interface between PyTorch versions doesn't change too much, and hence all code should also be runnable with newer versions.\n",
+ "\n",
+ "As in every machine learning framework, PyTorch provides functions that are stochastic like generating random numbers.\n",
+ "However, a very good practice is to setup your code to be reproducible with the exact same random numbers.\n",
+ "This is why we set a seed below."
+ ],
+ "id": "8a8171cd"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.245406Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.244938Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.249667Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.250066Z"
+ },
+ "papermill": {
+ "duration": 0.050609,
+ "end_time": "2021-09-16T12:32:47.250178",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.199569",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "5d3e8fcb",
+ "outputId": "207fbd40-2db7-4673-9090-9531f80e1042"
+ },
+ "source": [
+ "torch.manual_seed(42) # Setting the seed"
+ ],
+ "id": "5d3e8fcb",
+ "execution_count": null,
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.043829,
+ "end_time": "2021-09-16T12:32:47.337473",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.293644",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "6f28366d"
+ },
+ "source": [
+ "### Tensors\n",
+ "\n",
+ "Tensors are the PyTorch equivalent to Numpy arrays, with the addition to also have support for GPU acceleration (more on that later).\n",
+ "The name \"tensor\" is a generalization of concepts you already know.\n",
+ "For instance, a vector is a 1-D tensor, and a matrix a 2-D tensor.\n",
+ "When working with neural networks, we will use tensors of various shapes and number of dimensions.\n",
+ "\n",
+ "Most common functions you know from numpy can be used on tensors as well.\n",
+ "Actually, since numpy arrays are so similar to tensors, we can convert most tensors to numpy arrays (and back) but we don't need it too often.\n",
+ "\n",
+ "#### Initialization\n",
+ "\n",
+ "Let's first start by looking at different ways of creating a tensor.\n",
+ "There are many possible options, the most simple one is to call\n",
+ "`torch.Tensor` passing the desired shape as input argument:"
+ ],
+ "id": "6f28366d"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.428074Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.427607Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.431411Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.430883Z"
+ },
+ "papermill": {
+ "duration": 0.050551,
+ "end_time": "2021-09-16T12:32:47.431515",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.380964",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "7d6bab63",
+ "outputId": "c758273b-e053-4d33-b4f1-8972fe8cd99f"
+ },
+ "source": [
+ "x = torch.Tensor(2, 3, 4)\n",
+ "print(x)"
+ ],
+ "id": "7d6bab63",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([[[7.3697e+28, 2.7869e+29, 4.3059e+21, 6.9768e+22],\n",
+ " [6.8612e+22, 4.6114e+24, 3.0186e+32, 4.5434e+30],\n",
+ " [1.9519e-19, 7.4934e+28, 8.9068e-15, 5.6284e-14]],\n",
+ "\n",
+ " [[2.0618e-19, 1.0901e+27, 2.0532e-19, 1.7440e+28],\n",
+ " [1.2997e+34, 6.8608e+22, 4.7473e+27, 2.0532e-19],\n",
+ " [3.1771e+30, 7.2442e+22, 1.6931e+22, 1.1022e+24]]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.043882,
+ "end_time": "2021-09-16T12:32:47.519551",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.475669",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "2b19df67"
+ },
+ "source": [
+ "The function `torch.Tensor` allocates memory for the desired tensor, but reuses any values that have already been in the memory.\n",
+ "To directly assign values to the tensor during initialization, there are many alternatives including:\n",
+ "\n",
+ "* `torch.zeros`: Creates a tensor filled with zeros\n",
+ "* `torch.ones`: Creates a tensor filled with ones\n",
+ "* `torch.rand`: Creates a tensor with random values uniformly sampled between 0 and 1\n",
+ "* `torch.randn`: Creates a tensor with random values sampled from a normal distribution with mean 0 and variance 1\n",
+ "* `torch.arange`: Creates a tensor containing the values $N,N+1,N+2,...,M$\n",
+ "* `torch.Tensor` (input list): Creates a tensor from the list elements you provide"
+ ],
+ "id": "2b19df67"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.611131Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.610668Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.623382Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.622915Z"
+ },
+ "papermill": {
+ "duration": 0.060116,
+ "end_time": "2021-09-16T12:32:47.623485",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.563369",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "45fe1b7e",
+ "outputId": "f9c44389-af89-485f-9136-1993b41f98b4"
+ },
+ "source": [
+ "# Create a tensor from a (nested) list\n",
+ "x = torch.Tensor([[1, 2], [3, 4]])\n",
+ "print(x)"
+ ],
+ "id": "45fe1b7e",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([[1., 2.],\n",
+ " [3., 4.]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.717600Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.717128Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.720139Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.719670Z"
+ },
+ "papermill": {
+ "duration": 0.052738,
+ "end_time": "2021-09-16T12:32:47.720244",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.667506",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "76f8e1f5",
+ "outputId": "5e507b04-8239-4761-ecdc-3209b6d26279"
+ },
+ "source": [
+ "# Create a tensor with random values between 0 and 1 with the shape [2, 3, 4]\n",
+ "x = torch.rand(2, 3, 4)\n",
+ "print(x)"
+ ],
+ "id": "76f8e1f5",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([[[0.8823, 0.9150, 0.3829, 0.9593],\n",
+ " [0.3904, 0.6009, 0.2566, 0.7936],\n",
+ " [0.9408, 0.1332, 0.9346, 0.5936]],\n",
+ "\n",
+ " [[0.8694, 0.5677, 0.7411, 0.4294],\n",
+ " [0.8854, 0.5739, 0.2666, 0.6274],\n",
+ " [0.2696, 0.4414, 0.2969, 0.8317]]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.044337,
+ "end_time": "2021-09-16T12:32:47.809374",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.765037",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "f2c84d7c"
+ },
+ "source": [
+ "You can obtain the shape of a tensor in the same way as in numpy (`x.shape`), or using the `.size` method:"
+ ],
+ "id": "f2c84d7c"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:47.902716Z",
+ "iopub.status.busy": "2021-09-16T12:32:47.900874Z",
+ "iopub.status.idle": "2021-09-16T12:32:47.906006Z",
+ "shell.execute_reply": "2021-09-16T12:32:47.905588Z"
+ },
+ "papermill": {
+ "duration": 0.05197,
+ "end_time": "2021-09-16T12:32:47.906110",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.854140",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "b9738fb0",
+ "outputId": "eccc9bc4-660a-40bd-d539-1757d4caef32"
+ },
+ "source": [
+ "shape = x.shape\n",
+ "print(\"Shape:\", x.shape)\n",
+ "\n",
+ "size = x.size()\n",
+ "print(\"Size:\", size)\n",
+ "\n",
+ "dim1, dim2, dim3 = x.size()\n",
+ "print(\"Size:\", dim1, dim2, dim3)"
+ ],
+ "id": "b9738fb0",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Shape: torch.Size([2, 3, 4])\n",
+ "Size: torch.Size([2, 3, 4])\n",
+ "Size: 2 3 4\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.045873,
+ "end_time": "2021-09-16T12:32:47.996974",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:47.951101",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "0e9401d0"
+ },
+ "source": [
+ "#### Tensor to Numpy, and Numpy to Tensor\n",
+ "\n",
+ "Tensors can be converted to numpy arrays, and numpy arrays back to tensors.\n",
+ "To transform a numpy array into a tensor, we can use the function `torch.from_numpy`:"
+ ],
+ "id": "0e9401d0"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:48.091606Z",
+ "iopub.status.busy": "2021-09-16T12:32:48.091139Z",
+ "iopub.status.idle": "2021-09-16T12:32:48.093695Z",
+ "shell.execute_reply": "2021-09-16T12:32:48.094094Z"
+ },
+ "papermill": {
+ "duration": 0.052501,
+ "end_time": "2021-09-16T12:32:48.094216",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:48.041715",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e0670ce6",
+ "outputId": "02c97d56-0c83-460a-83bd-9f992491095d"
+ },
+ "source": [
+ "np_arr = np.array([[1, 2], [3, 4]])\n",
+ "tensor = torch.from_numpy(np_arr)\n",
+ "\n",
+ "print(\"Numpy array:\", np_arr)\n",
+ "print(\"PyTorch tensor:\", tensor)"
+ ],
+ "id": "e0670ce6",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Numpy array: [[1 2]\n",
+ " [3 4]]\n",
+ "PyTorch tensor: tensor([[1, 2],\n",
+ " [3, 4]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.045581,
+ "end_time": "2021-09-16T12:32:49.246779",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.201198",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "98f64f88"
+ },
+ "source": [
+ "To transform a PyTorch tensor back to a numpy array, we can use the function `.numpy()` on tensors:"
+ ],
+ "id": "98f64f88"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:49.340766Z",
+ "iopub.status.busy": "2021-09-16T12:32:49.340292Z",
+ "iopub.status.idle": "2021-09-16T12:32:49.343538Z",
+ "shell.execute_reply": "2021-09-16T12:32:49.343139Z"
+ },
+ "papermill": {
+ "duration": 0.05169,
+ "end_time": "2021-09-16T12:32:49.343640",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.291950",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "fb2c4b46",
+ "outputId": "f62d0660-5b03-4785-d05e-97500270a22d"
+ },
+ "source": [
+ "tensor = torch.arange(4)\n",
+ "np_arr = tensor.numpy()\n",
+ "\n",
+ "print(\"PyTorch tensor:\", tensor)\n",
+ "print(\"Numpy array:\", np_arr)"
+ ],
+ "id": "fb2c4b46",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "PyTorch tensor: tensor([0, 1, 2, 3])\n",
+ "Numpy array: [0 1 2 3]\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051428,
+ "end_time": "2021-09-16T12:32:49.440442",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.389014",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "d31dc267"
+ },
+ "source": [
+ "The conversion of tensors to numpy require the tensor to be on the CPU, and not the GPU (more on GPU support in a later section).\n",
+ "In case you have a tensor on GPU, you need to call `.cpu()` on the tensor beforehand.\n",
+ "Hence, you get a line like `np_arr = tensor.cpu().numpy()`."
+ ],
+ "id": "d31dc267"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.045401,
+ "end_time": "2021-09-16T12:32:49.530975",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.485574",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "46a6ef01"
+ },
+ "source": [
+ "#### Operations\n",
+ "\n",
+ "Most operations that exist in numpy, also exist in PyTorch.\n",
+ "A full list of operations can be found in the [PyTorch documentation](https://pytorch.org/docs/stable/tensors.html#), but we will review the most important ones here.\n",
+ "\n",
+ "The simplest operation is to add two tensors:"
+ ],
+ "id": "46a6ef01"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:49.625900Z",
+ "iopub.status.busy": "2021-09-16T12:32:49.625408Z",
+ "iopub.status.idle": "2021-09-16T12:32:49.629396Z",
+ "shell.execute_reply": "2021-09-16T12:32:49.629792Z"
+ },
+ "papermill": {
+ "duration": 0.053783,
+ "end_time": "2021-09-16T12:32:49.629915",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.576132",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "13f957c9",
+ "outputId": "503387cb-4c45-4a9b-ea9e-212268a017ff"
+ },
+ "source": [
+ "x1 = torch.rand(2, 3)\n",
+ "x2 = torch.rand(2, 3)\n",
+ "y = x1 + x2\n",
+ "\n",
+ "print(\"X1\", x1)\n",
+ "print(\"X2\", x2)\n",
+ "print(\"Y\", y)"
+ ],
+ "id": "13f957c9",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X1 tensor([[0.1053, 0.2695, 0.3588],\n",
+ " [0.1994, 0.5472, 0.0062]])\n",
+ "X2 tensor([[0.9516, 0.0753, 0.8860],\n",
+ " [0.5832, 0.3376, 0.8090]])\n",
+ "Y tensor([[1.0569, 0.3448, 1.2448],\n",
+ " [0.7826, 0.8848, 0.8151]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.047584,
+ "end_time": "2021-09-16T12:32:49.724517",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.676933",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "4fbd2538"
+ },
+ "source": [
+ "Calling `x1 + x2` creates a new tensor containing the sum of the two inputs.\n",
+ "However, we can also use in-place operations that are applied directly on the memory of a tensor.\n",
+ "We therefore change the values of `x2` without the chance to re-accessing the values of `x2` before the operation.\n",
+ "An example is shown below:"
+ ],
+ "id": "4fbd2538"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:49.823070Z",
+ "iopub.status.busy": "2021-09-16T12:32:49.822399Z",
+ "iopub.status.idle": "2021-09-16T12:32:49.828109Z",
+ "shell.execute_reply": "2021-09-16T12:32:49.827637Z"
+ },
+ "papermill": {
+ "duration": 0.055272,
+ "end_time": "2021-09-16T12:32:49.828214",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.772942",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "0e6a7497",
+ "outputId": "d0aab7ca-4ac1-42c9-94d7-214c4d52d30a"
+ },
+ "source": [
+ "x1 = torch.rand(2, 3)\n",
+ "x2 = torch.rand(2, 3)\n",
+ "print(\"X1 (before)\", x1)\n",
+ "print(\"X2 (before)\", x2)\n",
+ "\n",
+ "x2.add_(x1)\n",
+ "print(\"X1 (after)\", x1)\n",
+ "print(\"X2 (after)\", x2)"
+ ],
+ "id": "0e6a7497",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X1 (before) tensor([[0.5779, 0.9040, 0.5547],\n",
+ " [0.3423, 0.6343, 0.3644]])\n",
+ "X2 (before) tensor([[0.7104, 0.9464, 0.7890],\n",
+ " [0.2814, 0.7886, 0.5895]])\n",
+ "X1 (after) tensor([[0.5779, 0.9040, 0.5547],\n",
+ " [0.3423, 0.6343, 0.3644]])\n",
+ "X2 (after) tensor([[1.2884, 1.8504, 1.3437],\n",
+ " [0.6237, 1.4230, 0.9539]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.046964,
+ "end_time": "2021-09-16T12:32:49.921617",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.874653",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "5cbaa92f"
+ },
+ "source": [
+ "In-place operations are usually marked with a underscore postfix (e.g. \"add_\" instead of \"add\").\n",
+ "\n",
+ "Another common operation aims at changing the shape of a tensor.\n",
+ "A tensor of size (2,3) can be re-organized to any other shape with the same number of elements (e.g. a tensor of size (6), or (3,2), ...).\n",
+ "In PyTorch, this operation is called `view`:"
+ ],
+ "id": "5cbaa92f"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.026878Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.026353Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.029094Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.028625Z"
+ },
+ "papermill": {
+ "duration": 0.06096,
+ "end_time": "2021-09-16T12:32:50.029199",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:49.968239",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "6907851d",
+ "outputId": "06cd570d-7f0b-478f-c72a-6cf1eb1f4bc2"
+ },
+ "source": [
+ "x = torch.arange(6)\n",
+ "print(\"X\", x)"
+ ],
+ "id": "6907851d",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([0, 1, 2, 3, 4, 5])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.136129Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.135661Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.138426Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.137963Z"
+ },
+ "papermill": {
+ "duration": 0.054742,
+ "end_time": "2021-09-16T12:32:50.138528",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.083786",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "252fa33f",
+ "outputId": "223dccac-9220-47cb-88f2-0f5fd135618a"
+ },
+ "source": [
+ "x = x.view(2, 3)\n",
+ "print(\"X\", x)"
+ ],
+ "id": "252fa33f",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([[0, 1, 2],\n",
+ " [3, 4, 5]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.239003Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.238537Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.241342Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.240878Z"
+ },
+ "papermill": {
+ "duration": 0.053431,
+ "end_time": "2021-09-16T12:32:50.241439",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.188008",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "72e32ecb",
+ "outputId": "e9acaf48-2821-4d8e-b89b-2ae76d7f0d2e"
+ },
+ "source": [
+ "x = x.permute(1, 0) # Swapping dimension 0 and 1\n",
+ "print(\"X\", x)"
+ ],
+ "id": "72e32ecb",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([[0, 3],\n",
+ " [1, 4],\n",
+ " [2, 5]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.04706,
+ "end_time": "2021-09-16T12:32:50.335409",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.288349",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "cde67e9c"
+ },
+ "source": [
+ "Other commonly used operations include matrix multiplications, which are essential for neural networks.\n",
+ "Quite often, we have an input vector $\\mathbf{x}$, which is transformed using a learned weight matrix $\\mathbf{W}$.\n",
+ "There are multiple ways and functions to perform matrix multiplication, some of which we list below:\n",
+ "\n",
+ "* `torch.matmul`: Performs the matrix product over two tensors, where the specific behavior depends on the dimensions.\n",
+ "If both inputs are matrices (2-dimensional tensors), it performs the standard matrix product.\n",
+ "For higher dimensional inputs, the function supports broadcasting (for details see the [documentation](https://pytorch.org/docs/stable/generated/torch.matmul.html?highlight=matmul#torch.matmul)).\n",
+ "Can also be written as `a @ b`, similar to numpy.\n",
+ "* `torch.mm`: Performs the matrix product over two matrices, but doesn't support broadcasting (see [documentation](https://pytorch.org/docs/stable/generated/torch.mm.html?highlight=torch%20mm#torch.mm))\n",
+ "* `torch.bmm`: Performs the matrix product with a support batch dimension.\n",
+ "If the first tensor $T$ is of shape ($b\\times n\\times m$), and the second tensor $R$ ($b\\times m\\times p$), the output $O$ is of shape ($b\\times n\\times p$), and has been calculated by performing $b$ matrix multiplications of the submatrices of $T$ and $R$: $O_i = T_i @ R_i$\n",
+ "* `torch.einsum`: Performs matrix multiplications and more (i.e. sums of products) using the Einstein summation convention.\n",
+ "Explanation of the Einstein sum can be found in assignment 1.\n",
+ "\n",
+ "Usually, we use `torch.matmul` or `torch.bmm`. We can try a matrix multiplication with `torch.matmul` below."
+ ],
+ "id": "cde67e9c"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.432080Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.431615Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.434705Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.434244Z"
+ },
+ "papermill": {
+ "duration": 0.052861,
+ "end_time": "2021-09-16T12:32:50.434804",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.381943",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "ff386c27",
+ "outputId": "5fc6dccf-55bd-4ed8-b06d-17f0567da5ac"
+ },
+ "source": [
+ "x = torch.arange(6)\n",
+ "x = x.view(2, 3)\n",
+ "print(\"X\", x)"
+ ],
+ "id": "ff386c27",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([[0, 1, 2],\n",
+ " [3, 4, 5]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.533427Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.532966Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.535803Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.535338Z"
+ },
+ "papermill": {
+ "duration": 0.054221,
+ "end_time": "2021-09-16T12:32:50.535901",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.481680",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "8c1795af",
+ "outputId": "0e937cff-487b-467b-890e-8b91cccf4913"
+ },
+ "source": [
+ "W = torch.arange(9).view(3, 3) # We can also stack multiple operations in a single line\n",
+ "print(\"W\", W)"
+ ],
+ "id": "8c1795af",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "W tensor([[0, 1, 2],\n",
+ " [3, 4, 5],\n",
+ " [6, 7, 8]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.633621Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.633158Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.635999Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.635506Z"
+ },
+ "papermill": {
+ "duration": 0.052906,
+ "end_time": "2021-09-16T12:32:50.636097",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.583191",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "4dddd17e",
+ "outputId": "e0123356-0b94-487b-b600-d02f396d5075"
+ },
+ "source": [
+ "h = torch.matmul(x, W) # Verify the result by calculating it by hand too!\n",
+ "print(\"h\", h)"
+ ],
+ "id": "4dddd17e",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "h tensor([[15, 18, 21],\n",
+ " [42, 54, 66]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.048142,
+ "end_time": "2021-09-16T12:32:50.732093",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.683951",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "72d026f9"
+ },
+ "source": [
+ "#### Indexing\n",
+ "\n",
+ "We often have the situation where we need to select a part of a tensor.\n",
+ "Indexing works just like in numpy, so let's try it:"
+ ],
+ "id": "72d026f9"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.831818Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.831358Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.834223Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.833827Z"
+ },
+ "papermill": {
+ "duration": 0.054078,
+ "end_time": "2021-09-16T12:32:50.834321",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.780243",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "b44382eb",
+ "outputId": "aed72501-e9e9-4996-dab0-66f2211f390c"
+ },
+ "source": [
+ "x = torch.arange(12).view(3, 4)\n",
+ "print(\"X\", x)"
+ ],
+ "id": "b44382eb",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([[ 0, 1, 2, 3],\n",
+ " [ 4, 5, 6, 7],\n",
+ " [ 8, 9, 10, 11]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:50.933203Z",
+ "iopub.status.busy": "2021-09-16T12:32:50.932738Z",
+ "iopub.status.idle": "2021-09-16T12:32:50.935142Z",
+ "shell.execute_reply": "2021-09-16T12:32:50.934747Z"
+ },
+ "papermill": {
+ "duration": 0.05308,
+ "end_time": "2021-09-16T12:32:50.935240",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.882160",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e797f3da",
+ "outputId": "c53f9120-9e75-4b30-907a-206679e82791"
+ },
+ "source": [
+ "print(x[:, 1]) # Second column"
+ ],
+ "id": "e797f3da",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([1, 5, 9])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.035597Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.035133Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.037860Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.037378Z"
+ },
+ "papermill": {
+ "duration": 0.053815,
+ "end_time": "2021-09-16T12:32:51.037961",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:50.984146",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "832fa534",
+ "outputId": "578ac154-c19e-4acd-9e5d-8a2d2dd5b8a2"
+ },
+ "source": [
+ "print(x[0]) # First row"
+ ],
+ "id": "832fa534",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([0, 1, 2, 3])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.138719Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.138254Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.140664Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.140201Z"
+ },
+ "papermill": {
+ "duration": 0.053829,
+ "end_time": "2021-09-16T12:32:51.140762",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.086933",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "554196e9",
+ "outputId": "10730a61-9499-4cb4-966b-7beb994a547d"
+ },
+ "source": [
+ "print(x[:2, -1]) # First two rows, last column"
+ ],
+ "id": "554196e9",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([3, 7])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.242836Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.242376Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.245113Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.244657Z"
+ },
+ "papermill": {
+ "duration": 0.054275,
+ "end_time": "2021-09-16T12:32:51.245210",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.190935",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "2efaee3a",
+ "outputId": "4589b195-ef4d-4fb1-a51a-64c374e68484"
+ },
+ "source": [
+ "print(x[1:3, :]) # Middle two rows"
+ ],
+ "id": "2efaee3a",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([[ 4, 5, 6, 7],\n",
+ " [ 8, 9, 10, 11]])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.049087,
+ "end_time": "2021-09-16T12:32:51.343540",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.294453",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e1a9591f"
+ },
+ "source": [
+ "### Dynamic Computation Graph and Backpropagation\n",
+ "\n",
+ "One of the main reasons for using PyTorch in Deep Learning projects is that we can automatically get **gradients/derivatives** of functions that we define.\n",
+ "We will mainly use PyTorch for implementing neural networks, and they are just fancy functions.\n",
+ "If we use weight matrices in our function that we want to learn, then those are called the **parameters** or simply the **weights**.\n",
+ "\n",
+ "If our neural network would output a single scalar value, we would talk about taking the **derivative**, but you will see that quite often we will have **multiple** output variables (\"values\"); in that case we talk about **gradients**.\n",
+ "It's a more general term.\n",
+ "\n",
+ "Given an input $\\mathbf{x}$, we define our function by **manipulating** that input, usually by matrix-multiplications with weight matrices and additions with so-called bias vectors.\n",
+ "As we manipulate our input, we are automatically creating a **computational graph**.\n",
+ "This graph shows how to arrive at our output from our input.\n",
+ "PyTorch is a **define-by-run** framework; this means that we can just do our manipulations, and PyTorch will keep track of that graph for us.\n",
+ "Thus, we create a dynamic computation graph along the way.\n",
+ "\n",
+ "So, to recap: the only thing we have to do is to compute the **output**, and then we can ask PyTorch to automatically get the **gradients**.\n",
+ "\n",
+ "> **Note: Why do we want gradients?\n",
+ "** Consider that we have defined a function, a neural net, that is supposed to compute a certain output $y$ for an input vector $\\mathbf{x}$.\n",
+ "We then define an **error measure** that tells us how wrong our network is; how bad it is in predicting output $y$ from input $\\mathbf{x}$.\n",
+ "Based on this error measure, we can use the gradients to **update** the weights $\\mathbf{W}$ that were responsible for the output, so that the next time we present input $\\mathbf{x}$ to our network, the output will be closer to what we want.\n",
+ "\n",
+ "The first thing we have to do is to specify which tensors require gradients.\n",
+ "By default, when we create a tensor, it does not require gradients."
+ ],
+ "id": "e1a9591f"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.444570Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.443320Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.447126Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.446665Z"
+ },
+ "papermill": {
+ "duration": 0.054607,
+ "end_time": "2021-09-16T12:32:51.447227",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.392620",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "9f94399d",
+ "outputId": "9a315d48-5385-4e97-953d-574848a420e4"
+ },
+ "source": [
+ "x = torch.ones((3,))\n",
+ "print(x.requires_grad)"
+ ],
+ "id": "9f94399d",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "False\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.049292,
+ "end_time": "2021-09-16T12:32:51.546032",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.496740",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "12697ab1"
+ },
+ "source": [
+ "We can change this for an existing tensor using the function `requires_grad_()` (underscore indicating that this is a in-place operation).\n",
+ "Alternatively, when creating a tensor, you can pass the argument\n",
+ "`requires_grad=True` to most initializers we have seen above."
+ ],
+ "id": "12697ab1"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.647892Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.647430Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.649913Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.649498Z"
+ },
+ "papermill": {
+ "duration": 0.05454,
+ "end_time": "2021-09-16T12:32:51.650014",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.595474",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "7d565264",
+ "outputId": "d1359e2c-57c8-4e76-9632-dedeb388e9cc"
+ },
+ "source": [
+ "x.requires_grad_(True)\n",
+ "print(x.requires_grad)"
+ ],
+ "id": "7d565264",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "True\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.050272,
+ "end_time": "2021-09-16T12:32:51.750030",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.699758",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "a16f495e"
+ },
+ "source": [
+ "In order to get familiar with the concept of a computation graph, we will create one for the following function:\n",
+ "\n",
+ "$$y = \\frac{1}{|x|}\\sum_i \\left[(x_i + 2)^2 + 3\\right]$$\n",
+ "\n",
+ "You could imagine that $x$ are our parameters, and we want to optimize (either maximize or minimize) the output $y$.\n",
+ "For this, we want to obtain the gradients $\\partial y / \\partial \\mathbf{x}$.\n",
+ "For our example, we'll use $\\mathbf{x}=[0,1,2]$ as our input."
+ ],
+ "id": "a16f495e"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:51.853091Z",
+ "iopub.status.busy": "2021-09-16T12:32:51.852629Z",
+ "iopub.status.idle": "2021-09-16T12:32:51.855635Z",
+ "shell.execute_reply": "2021-09-16T12:32:51.855175Z"
+ },
+ "papermill": {
+ "duration": 0.055874,
+ "end_time": "2021-09-16T12:32:51.855735",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.799861",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "abd9c738",
+ "outputId": "e7e68aea-7afb-4d35-9e83-eb98cbc0c1cc"
+ },
+ "source": [
+ "x = torch.arange(3, dtype=torch.float32, requires_grad=True) # Only float tensors can have gradients\n",
+ "print(\"X\", x)"
+ ],
+ "id": "abd9c738",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([0., 1., 2.], requires_grad=True)\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.05566,
+ "end_time": "2021-09-16T12:32:51.961765",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:51.906105",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "548b420c"
+ },
+ "source": [
+ "Now let's build the computation graph step by step.\n",
+ "You can combine multiple operations in a single line, but we will\n",
+ "separate them here to get a better understanding of how each operation\n",
+ "is added to the computation graph."
+ ],
+ "id": "548b420c"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:52.072120Z",
+ "iopub.status.busy": "2021-09-16T12:32:52.071647Z",
+ "iopub.status.idle": "2021-09-16T12:32:52.074610Z",
+ "shell.execute_reply": "2021-09-16T12:32:52.074989Z"
+ },
+ "papermill": {
+ "duration": 0.056246,
+ "end_time": "2021-09-16T12:32:52.075114",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.018868",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "50d91bf7",
+ "outputId": "9851aea1-fb05-482b-b55f-1ab70bc0bb57"
+ },
+ "source": [
+ "a = x + 2\n",
+ "b = a ** 2\n",
+ "c = b + 3\n",
+ "y = c.mean()\n",
+ "print(\"Y\", y)"
+ ],
+ "id": "50d91bf7",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Y tensor(12.6667, grad_fn=)\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.049612,
+ "end_time": "2021-09-16T12:32:52.175001",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.125389",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e0d2f0ae"
+ },
+ "source": [
+ "Using the statements above, we have created a computation graph that looks similar to the figure below:\n",
+ "\n",
+ "
\n",
+ "\n",
+ "We calculate $a$ based on the inputs $x$ and the constant $2$, $b$ is $a$ squared, and so on.\n",
+ "The visualization is an abstraction of the dependencies between inputs and outputs of the operations we have applied.\n",
+ "Each node of the computation graph has automatically defined a function for calculating the gradients with respect to its inputs, `grad_fn`.\n",
+ "You can see this when we printed the output tensor $y$.\n",
+ "This is why the computation graph is usually visualized in the reverse direction (arrows point from the result to the inputs).\n",
+ "We can perform backpropagation on the computation graph by calling the\n",
+ "function `backward()` on the last output, which effectively calculates\n",
+ "the gradients for each tensor that has the property\n",
+ "`requires_grad=True`:"
+ ],
+ "id": "e0d2f0ae"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:52.278438Z",
+ "iopub.status.busy": "2021-09-16T12:32:52.277977Z",
+ "iopub.status.idle": "2021-09-16T12:32:52.363356Z",
+ "shell.execute_reply": "2021-09-16T12:32:52.362899Z"
+ },
+ "papermill": {
+ "duration": 0.137892,
+ "end_time": "2021-09-16T12:32:52.363476",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.225584",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "d7c2de18"
+ },
+ "source": [
+ "y.backward()"
+ ],
+ "id": "d7c2de18",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.050494,
+ "end_time": "2021-09-16T12:32:52.465208",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.414714",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "44d67068"
+ },
+ "source": [
+ "`x.grad` will now contain the gradient $\\partial y/ \\partial \\mathcal{x}$, and this gradient indicates how a change in $\\mathbf{x}$ will affect output $y$ given the current input $\\mathbf{x}=[0,1,2]$:"
+ ],
+ "id": "44d67068"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:52.569520Z",
+ "iopub.status.busy": "2021-09-16T12:32:52.569055Z",
+ "iopub.status.idle": "2021-09-16T12:32:52.572034Z",
+ "shell.execute_reply": "2021-09-16T12:32:52.571551Z"
+ },
+ "papermill": {
+ "duration": 0.056348,
+ "end_time": "2021-09-16T12:32:52.572135",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.515787",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "58d14f6c",
+ "outputId": "97e37bad-a05c-4028-d7b5-99cfc931f671"
+ },
+ "source": [
+ "print(x.grad)"
+ ],
+ "id": "58d14f6c",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "tensor([1.3333, 2.0000, 2.6667])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.050295,
+ "end_time": "2021-09-16T12:32:52.673692",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.623397",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "a180ccfc"
+ },
+ "source": [
+ "We can also verify these gradients by hand.\n",
+ "We will calculate the gradients using the chain rule, in the same way as PyTorch did it:\n",
+ "\n",
+ "$$\\frac{\\partial y}{\\partial x_i} = \\frac{\\partial y}{\\partial c_i}\\frac{\\partial c_i}{\\partial b_i}\\frac{\\partial b_i}{\\partial a_i}\\frac{\\partial a_i}{\\partial x_i}$$\n",
+ "\n",
+ "Note that we have simplified this equation to index notation, and by using the fact that all operation besides the mean do not combine the elements in the tensor.\n",
+ "The partial derivatives are:\n",
+ "\n",
+ "$$\n",
+ "\\frac{\\partial a_i}{\\partial x_i} = 1,\\hspace{1cm}\n",
+ "\\frac{\\partial b_i}{\\partial a_i} = 2\\cdot a_i\\hspace{1cm}\n",
+ "\\frac{\\partial c_i}{\\partial b_i} = 1\\hspace{1cm}\n",
+ "\\frac{\\partial y}{\\partial c_i} = \\frac{1}{3}\n",
+ "$$\n",
+ "\n",
+ "Hence, with the input being $\\mathbf{x}=[0,1,2]$, our gradients are $\\partial y/\\partial \\mathbf{x}=[4/3,2,8/3]$.\n",
+ "The previous code cell should have printed the same result."
+ ],
+ "id": "a180ccfc"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051077,
+ "end_time": "2021-09-16T12:32:52.777753",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.726676",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "804f38e2"
+ },
+ "source": [
+ "### GPU support\n",
+ "\n",
+ "A crucial feature of PyTorch is the support of GPUs, short for Graphics Processing Unit.\n",
+ "A GPU can perform many thousands of small operations in parallel, making it very well suitable for performing large matrix operations in neural networks.\n",
+ "When comparing GPUs to CPUs, we can list the following main differences (credit: [Kevin Krewell, 2009](https://blogs.nvidia.com/blog/2009/12/16/whats-the-difference-between-a-cpu-and-a-gpu/))\n",
+ "\n",
+ "
\n",
+ "\n",
+ "CPUs and GPUs have both different advantages and disadvantages, which is why many computers contain both components and use them for different tasks.\n",
+ "In case you are not familiar with GPUs, you can read up more details in this [NVIDIA blog post](https://blogs.nvidia.com/blog/2009/12/16/whats-the-difference-between-a-cpu-and-a-gpu/) or [here](https://www.intel.com/content/www/us/en/products/docs/processors/what-is-a-gpu.html).\n",
+ "\n",
+ "GPUs can accelerate the training of your network up to a factor of $100$ which is essential for large neural networks.\n",
+ "PyTorch implements a lot of functionality for supporting GPUs (mostly those of NVIDIA due to the libraries [CUDA](https://developer.nvidia.com/cuda-zone) and [cuDNN](https://developer.nvidia.com/cudnn)).\n",
+ "First, let's check whether you have a GPU available:"
+ ],
+ "id": "804f38e2"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:52.886128Z",
+ "iopub.status.busy": "2021-09-16T12:32:52.885628Z",
+ "iopub.status.idle": "2021-09-16T12:32:52.888249Z",
+ "shell.execute_reply": "2021-09-16T12:32:52.887851Z"
+ },
+ "papermill": {
+ "duration": 0.059327,
+ "end_time": "2021-09-16T12:32:52.888348",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.829021",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "be576156",
+ "outputId": "35e4b3dd-c780-4b96-81e7-d3b590c6322d"
+ },
+ "source": [
+ "gpu_avail = torch.cuda.is_available()\n",
+ "print(f\"Is the GPU available? {gpu_avail}\")"
+ ],
+ "id": "be576156",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Is the GPU available? True\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051392,
+ "end_time": "2021-09-16T12:32:52.990937",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:52.939545",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "44ba6d0f"
+ },
+ "source": [
+ "If you have a GPU on your computer but the command above returns False, make sure you have the correct CUDA-version installed.\n",
+ "The `dl2020` environment comes with the CUDA-toolkit 10.1, which is selected for the Lisa supercomputer.\n",
+ "Please change it if necessary (CUDA 10.2 is currently common).\n",
+ "On Google Colab, make sure that you have selected a GPU in your runtime setup (in the menu, check under `Runtime -> Change runtime type`).\n",
+ "\n",
+ "By default, all tensors you create are stored on the CPU.\n",
+ "We can push a tensor to the GPU by using the function `.to(...)`, or `.cuda()`.\n",
+ "However, it is often a good practice to define a `device` object in your code which points to the GPU if you have one, and otherwise to the CPU.\n",
+ "Then, you can write your code with respect to this device object, and it allows you to run the same code on both a CPU-only system, and one with a GPU.\n",
+ "Let's try it below.\n",
+ "We can specify the device as follows:"
+ ],
+ "id": "44ba6d0f"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:53.096938Z",
+ "iopub.status.busy": "2021-09-16T12:32:53.096464Z",
+ "iopub.status.idle": "2021-09-16T12:32:53.099120Z",
+ "shell.execute_reply": "2021-09-16T12:32:53.098658Z"
+ },
+ "papermill": {
+ "duration": 0.057283,
+ "end_time": "2021-09-16T12:32:53.099221",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:53.041938",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "c1821da3",
+ "outputId": "bc003b3f-93d1-486a-a580-dd094dd0df95"
+ },
+ "source": [
+ "device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
+ "print(\"Device\", device)"
+ ],
+ "id": "c1821da3",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Device cuda\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052772,
+ "end_time": "2021-09-16T12:32:53.204148",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:53.151376",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "c7b99a5d"
+ },
+ "source": [
+ "Now let's create a tensor and push it to the device:"
+ ],
+ "id": "c7b99a5d"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:53.312496Z",
+ "iopub.status.busy": "2021-09-16T12:32:53.312034Z",
+ "iopub.status.idle": "2021-09-16T12:32:55.885460Z",
+ "shell.execute_reply": "2021-09-16T12:32:55.884980Z"
+ },
+ "papermill": {
+ "duration": 2.629406,
+ "end_time": "2021-09-16T12:32:55.885574",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:53.256168",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "be1ce082",
+ "outputId": "dd451975-df98-4f52-8803-5310cca38bf6"
+ },
+ "source": [
+ "x = torch.zeros(2, 3)\n",
+ "x = x.to(device)\n",
+ "print(\"X\", x)"
+ ],
+ "id": "be1ce082",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "X tensor([[0., 0., 0.],\n",
+ " [0., 0., 0.]], device='cuda:0')\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052118,
+ "end_time": "2021-09-16T12:32:55.989872",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:55.937754",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e1bb4237"
+ },
+ "source": [
+ "In case you have a GPU, you should now see the attribute `device='cuda:0'` being printed next to your tensor.\n",
+ "The zero next to cuda indicates that this is the zero-th GPU device on your computer.\n",
+ "PyTorch also supports multi-GPU systems, but this you will only need once you have very big networks to train (if interested, see the [PyTorch documentation](https://pytorch.org/docs/stable/distributed.html#distributed-basics)).\n",
+ "We can also compare the runtime of a large matrix multiplication on the CPU with a operation on the GPU:"
+ ],
+ "id": "e1bb4237"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:56.099380Z",
+ "iopub.status.busy": "2021-09-16T12:32:56.098905Z",
+ "iopub.status.idle": "2021-09-16T12:32:56.633065Z",
+ "shell.execute_reply": "2021-09-16T12:32:56.632672Z"
+ },
+ "papermill": {
+ "duration": 0.59052,
+ "end_time": "2021-09-16T12:32:56.633183",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:56.042663",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "097f28c5",
+ "outputId": "afb6477a-8658-4a72-adcf-83cc58bda39e"
+ },
+ "source": [
+ "x = torch.randn(5000, 5000)\n",
+ "\n",
+ "# CPU version\n",
+ "start_time = time.time()\n",
+ "_ = torch.matmul(x, x)\n",
+ "end_time = time.time()\n",
+ "print(f\"CPU time: {(end_time - start_time):6.5f}s\")\n",
+ "\n",
+ "# GPU version\n",
+ "x = x.to(device)\n",
+ "# The first operation on a CUDA device can be slow as it has to establish a CPU-GPU communication first.\n",
+ "# Hence, we run an arbitrary command first without timing it for a fair comparison.\n",
+ "if torch.cuda.is_available():\n",
+ " _ = torch.matmul(x * 0.0, x)\n",
+ "start_time = time.time()\n",
+ "_ = torch.matmul(x, x)\n",
+ "end_time = time.time()\n",
+ "print(f\"GPU time: {(end_time - start_time):6.5f}s\")"
+ ],
+ "id": "097f28c5",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "CPU time: 0.25468s\n",
+ "GPU time: 0.00011s\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.054603,
+ "end_time": "2021-09-16T12:32:56.740740",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:56.686137",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e6502b2e"
+ },
+ "source": [
+ "Depending on the size of the operation and the CPU/GPU in your system, the speedup of this operation can be >500x.\n",
+ "As `matmul` operations are very common in neural networks, we can already see the great benefit of training a NN on a GPU.\n",
+ "The time estimate can be relatively noisy here because we haven't run it for multiple times.\n",
+ "Feel free to extend this, but it also takes longer to run.\n",
+ "\n",
+ "When generating random numbers, the seed between CPU and GPU is not synchronized.\n",
+ "Hence, we need to set the seed on the GPU separately to ensure a reproducible code.\n",
+ "Note that due to different GPU architectures, running the same code on different GPUs does not guarantee the same random numbers.\n",
+ "Still, we don't want that our code gives us a different output every time we run it on the exact same hardware.\n",
+ "Hence, we also set the seed on the GPU:"
+ ],
+ "id": "e6502b2e"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:56.849316Z",
+ "iopub.status.busy": "2021-09-16T12:32:56.848847Z",
+ "iopub.status.idle": "2021-09-16T12:32:56.850935Z",
+ "shell.execute_reply": "2021-09-16T12:32:56.850475Z"
+ },
+ "papermill": {
+ "duration": 0.057334,
+ "end_time": "2021-09-16T12:32:56.851032",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:56.793698",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "5b767a95"
+ },
+ "source": [
+ "# GPU operations have a separate seed we also want to set\n",
+ "if torch.cuda.is_available():\n",
+ " torch.cuda.manual_seed(42)\n",
+ " torch.cuda.manual_seed_all(42)\n",
+ "\n",
+ "# Additionally, some operations on a GPU are implemented stochastic for efficiency\n",
+ "# We want to ensure that all operations are deterministic on GPU (if used) for reproducibility\n",
+ "torch.backends.cudnn.determinstic = True\n",
+ "torch.backends.cudnn.benchmark = False"
+ ],
+ "id": "5b767a95",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051866,
+ "end_time": "2021-09-16T12:32:56.955066",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:56.903200",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "f4ca3f5b"
+ },
+ "source": [
+ "## Learning by example: Continuous XOR\n",
+ "
\n",
+ "\n",
+ "If we want to build a neural network in PyTorch, we could specify all our parameters (weight matrices, bias vectors) using `Tensors` (with `requires_grad=True`), ask PyTorch to calculate the gradients and then adjust the parameters.\n",
+ "But things can quickly get cumbersome if we have a lot of parameters.\n",
+ "In PyTorch, there is a package called `torch.nn` that makes building neural networks more convenient.\n",
+ "\n",
+ "We will introduce the libraries and all additional parts you might need to train a neural network in PyTorch, using a simple example classifier on a simple yet well known example: XOR.\n",
+ "Given two binary inputs $x_1$ and $x_2$, the label to predict is $1$ if either $x_1$ or $x_2$ is $1$ while the other is $0$, or the label is $0$ in all other cases.\n",
+ "The example became famous by the fact that a single neuron, i.e. a linear classifier, cannot learn this simple function.\n",
+ "Hence, we will learn how to build a small neural network that can learn this function.\n",
+ "To make it a little bit more interesting, we move the XOR into continuous space and introduce some gaussian noise on the binary inputs.\n",
+ "Our desired separation of an XOR dataset could look as follows:\n",
+ "\n",
+ "
"
+ ],
+ "id": "f4ca3f5b"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051714,
+ "end_time": "2021-09-16T12:32:57.058731",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.007017",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "e23f8eac"
+ },
+ "source": [
+ "### The model\n",
+ "\n",
+ "The package `torch.nn` defines a series of useful classes like linear networks layers, activation functions, loss functions etc.\n",
+ "A full list can be found [here](https://pytorch.org/docs/stable/nn.html).\n",
+ "In case you need a certain network layer, check the documentation of the package first before writing the layer yourself as the package likely contains the code for it already.\n",
+ "We import it below:"
+ ],
+ "id": "e23f8eac"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "lines_to_next_cell": 0,
+ "papermill": {
+ "duration": 0.052216,
+ "end_time": "2021-09-16T12:32:57.162758",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.110542",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "8592c856"
+ },
+ "source": [
+ ""
+ ],
+ "id": "8592c856",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052415,
+ "end_time": "2021-09-16T12:32:57.268259",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.215844",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "bf8c706a"
+ },
+ "source": [
+ ""
+ ],
+ "id": "bf8c706a",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051617,
+ "end_time": "2021-09-16T12:32:57.371727",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.320110",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "ba549835"
+ },
+ "source": [
+ "Additionally to `torch.nn`, there is also `torch.nn.functional`.\n",
+ "It contains functions that are used in network layers.\n",
+ "This is in contrast to `torch.nn` which defines them as `nn.Modules` (more on it below), and `torch.nn` actually uses a lot of functionalities from `torch.nn.functional`.\n",
+ "Hence, the functional package is useful in many situations, and so we import it as well here."
+ ],
+ "id": "ba549835"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 2,
+ "papermill": {
+ "duration": 0.052024,
+ "end_time": "2021-09-16T12:32:57.475971",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.423947",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "acc7d527"
+ },
+ "source": [
+ "#### nn.Module\n",
+ "\n",
+ "In PyTorch, a neural network is build up out of modules.\n",
+ "Modules can contain other modules, and a neural network is considered to be a module itself as well.\n",
+ "The basic template of a module is as follows:"
+ ],
+ "id": "acc7d527"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:57.583596Z",
+ "iopub.status.busy": "2021-09-16T12:32:57.583131Z",
+ "iopub.status.idle": "2021-09-16T12:32:57.585190Z",
+ "shell.execute_reply": "2021-09-16T12:32:57.584806Z"
+ },
+ "lines_to_next_cell": 2,
+ "papermill": {
+ "duration": 0.057057,
+ "end_time": "2021-09-16T12:32:57.585292",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.528235",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "34d1a3d7"
+ },
+ "source": [
+ "class MyModule(nn.Module):\n",
+ " def __init__(self):\n",
+ " super().__init__()\n",
+ " # Some init for my module\n",
+ "\n",
+ " def forward(self, x):\n",
+ " # Function for performing the calculation of the module.\n",
+ " pass"
+ ],
+ "id": "34d1a3d7",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 2,
+ "papermill": {
+ "duration": 0.051843,
+ "end_time": "2021-09-16T12:32:57.689235",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.637392",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "04b470dc"
+ },
+ "source": [
+ "The forward function is where the computation of the module is taken place, and is executed when you call the module (`nn = MyModule(); nn(x)`).\n",
+ "In the init function, we usually create the parameters of the module, using `nn.Parameter`, or defining other modules that are used in the forward function.\n",
+ "The backward calculation is done automatically, but could be overwritten as well if wanted.\n",
+ "\n",
+ "#### Simple classifier\n",
+ "We can now make use of the pre-defined modules in the `torch.nn` package, and define our own small neural network.\n",
+ "We will use a minimal network with a input layer, one hidden layer with tanh as activation function, and a output layer.\n",
+ "In other words, our networks should look something like this:\n",
+ "\n",
+ "
\n",
+ "\n",
+ "The input neurons are shown in blue, which represent the coordinates $x_1$ and $x_2$ of a data point.\n",
+ "The hidden neurons including a tanh activation are shown in white, and the output neuron in red.\n",
+ "In PyTorch, we can define this as follows:"
+ ],
+ "id": "04b470dc"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:57.800277Z",
+ "iopub.status.busy": "2021-09-16T12:32:57.799807Z",
+ "iopub.status.idle": "2021-09-16T12:32:57.801874Z",
+ "shell.execute_reply": "2021-09-16T12:32:57.801393Z"
+ },
+ "papermill": {
+ "duration": 0.057783,
+ "end_time": "2021-09-16T12:32:57.801973",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.744190",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "2606872c"
+ },
+ "source": [
+ "class SimpleClassifier(nn.Module):\n",
+ " def __init__(self, num_inputs, num_hidden, num_outputs):\n",
+ " super().__init__()\n",
+ " # Initialize the modules we need to build the network\n",
+ " self.linear1 = nn.Linear(num_inputs, num_hidden)\n",
+ " self.act_fn = nn.Tanh()\n",
+ " self.linear2 = nn.Linear(num_hidden, num_outputs)\n",
+ "\n",
+ " def forward(self, x):\n",
+ " # Perform the calculation of the model to determine the prediction\n",
+ " x = self.linear1(x)\n",
+ " x = self.act_fn(x)\n",
+ " x = self.linear2(x)\n",
+ " return x"
+ ],
+ "id": "2606872c",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.051977,
+ "end_time": "2021-09-16T12:32:57.905865",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.853888",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "708b426d"
+ },
+ "source": [
+ "For the examples in this notebook, we will use a tiny neural network with two input neurons and four hidden neurons.\n",
+ "As we perform binary classification, we will use a single output neuron.\n",
+ "Note that we do not apply a sigmoid on the output yet.\n",
+ "This is because other functions, especially the loss, are more efficient and precise to calculate on the original outputs instead of the sigmoid output.\n",
+ "We will discuss the detailed reason later."
+ ],
+ "id": "708b426d"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:58.014018Z",
+ "iopub.status.busy": "2021-09-16T12:32:58.013529Z",
+ "iopub.status.idle": "2021-09-16T12:32:58.016608Z",
+ "shell.execute_reply": "2021-09-16T12:32:58.016147Z"
+ },
+ "papermill": {
+ "duration": 0.058322,
+ "end_time": "2021-09-16T12:32:58.016706",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:57.958384",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "f8c99074",
+ "outputId": "472ac145-a50c-42e7-f25f-c05cc72be78c"
+ },
+ "source": [
+ "model = SimpleClassifier(num_inputs=2, num_hidden=4, num_outputs=1)\n",
+ "# Printing a module shows all its submodules\n",
+ "print(model)"
+ ],
+ "id": "f8c99074",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "SimpleClassifier(\n",
+ " (linear1): Linear(in_features=2, out_features=4, bias=True)\n",
+ " (act_fn): Tanh()\n",
+ " (linear2): Linear(in_features=4, out_features=1, bias=True)\n",
+ ")\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.0523,
+ "end_time": "2021-09-16T12:32:58.121280",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.068980",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "7b432a95"
+ },
+ "source": [
+ "Printing the model lists all submodules it contains.\n",
+ "The parameters of a module can be obtained by using its `parameters()` functions, or `named_parameters()` to get a name to each parameter object.\n",
+ "For our small neural network, we have the following parameters:"
+ ],
+ "id": "7b432a95"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:58.230366Z",
+ "iopub.status.busy": "2021-09-16T12:32:58.229902Z",
+ "iopub.status.idle": "2021-09-16T12:32:58.232813Z",
+ "shell.execute_reply": "2021-09-16T12:32:58.232352Z"
+ },
+ "papermill": {
+ "duration": 0.059317,
+ "end_time": "2021-09-16T12:32:58.232913",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.173596",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "52c7230d",
+ "outputId": "2333aa10-d564-4873-a505-3f73dbf6f76e"
+ },
+ "source": [
+ "for name, param in model.named_parameters():\n",
+ " print(f\"Parameter {name}, shape {param.shape}\")"
+ ],
+ "id": "52c7230d",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Parameter linear1.weight, shape torch.Size([4, 2])\n",
+ "Parameter linear1.bias, shape torch.Size([4])\n",
+ "Parameter linear2.weight, shape torch.Size([1, 4])\n",
+ "Parameter linear2.bias, shape torch.Size([1])\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.053627,
+ "end_time": "2021-09-16T12:32:58.340801",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.287174",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "b2650ef9"
+ },
+ "source": [
+ "Each linear layer has a weight matrix of the shape `[output, input]`, and a bias of the shape `[output]`.\n",
+ "The tanh activation function does not have any parameters.\n",
+ "Note that parameters are only registered for `nn.Module` objects that are direct object attributes, i.e. `self.a = ...`.\n",
+ "If you define a list of modules, the parameters of those are not registered for the outer module and can cause some issues when you try to optimize your module.\n",
+ "There are alternatives, like `nn.ModuleList`, `nn.ModuleDict` and `nn.Sequential`, that allow you to have different data structures of modules.\n",
+ "We will use them in a few later tutorials and explain them there."
+ ],
+ "id": "b2650ef9"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052923,
+ "end_time": "2021-09-16T12:32:58.446527",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.393604",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "463b7836"
+ },
+ "source": [
+ "### The data\n",
+ "\n",
+ "PyTorch also provides a few functionalities to load the training and\n",
+ "test data efficiently, summarized in the package `torch.utils.data`."
+ ],
+ "id": "463b7836"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052877,
+ "end_time": "2021-09-16T12:32:58.552525",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.499648",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "0ab84d11"
+ },
+ "source": [
+ ""
+ ],
+ "id": "0ab84d11",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.052744,
+ "end_time": "2021-09-16T12:32:58.658400",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.605656",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "21c14544"
+ },
+ "source": [
+ "The data package defines two classes which are the standard interface for handling data in PyTorch: `data.Dataset`, and `data.DataLoader`.\n",
+ "The dataset class provides an uniform interface to access the\n",
+ "training/test data, while the data loader makes sure to efficiently load\n",
+ "and stack the data points from the dataset into batches during training."
+ ],
+ "id": "21c14544"
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.055595,
+ "end_time": "2021-09-16T12:32:58.767233",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.711638",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "fb7ac0a3"
+ },
+ "source": [
+ "#### The dataset class\n",
+ "\n",
+ "The dataset class summarizes the basic functionality of a dataset in a natural way.\n",
+ "To define a dataset in PyTorch, we simply specify two functions: `__getitem__`, and `__len__`.\n",
+ "The get-item function has to return the $i$-th data point in the dataset, while the len function returns the size of the dataset.\n",
+ "For the XOR dataset, we can define the dataset class as follows:"
+ ],
+ "id": "fb7ac0a3"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:58.880519Z",
+ "iopub.status.busy": "2021-09-16T12:32:58.880040Z",
+ "iopub.status.idle": "2021-09-16T12:32:58.881657Z",
+ "shell.execute_reply": "2021-09-16T12:32:58.882056Z"
+ },
+ "papermill": {
+ "duration": 0.061328,
+ "end_time": "2021-09-16T12:32:58.882175",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.820847",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "85adf0a4"
+ },
+ "source": [
+ "\n",
+ "\n",
+ "class XORDataset(data.Dataset):\n",
+ " def __init__(self, size, std=0.1):\n",
+ " \"\"\"\n",
+ " Inputs:\n",
+ " size - Number of data points we want to generate\n",
+ " std - Standard deviation of the noise (see generate_continuous_xor function)\n",
+ " \"\"\"\n",
+ " super().__init__()\n",
+ " self.size = size\n",
+ " self.std = std\n",
+ " self.generate_continuous_xor()\n",
+ "\n",
+ " def generate_continuous_xor(self):\n",
+ " # Each data point in the XOR dataset has two variables, x and y, that can be either 0 or 1\n",
+ " # The label is their XOR combination, i.e. 1 if only x or only y is 1 while the other is 0.\n",
+ " # If x=y, the label is 0.\n",
+ " data = torch.randint(low=0, high=2, size=(self.size, 2), dtype=torch.float32)\n",
+ " label = (data.sum(dim=1) == 1).to(torch.long)\n",
+ " # To make it slightly more challenging, we add a bit of gaussian noise to the data points.\n",
+ " data += self.std * torch.randn(data.shape)\n",
+ "\n",
+ " self.data = data\n",
+ " self.label = label\n",
+ "\n",
+ " def __len__(self):\n",
+ " # Number of data point we have. Alternatively self.data.shape[0], or self.label.shape[0]\n",
+ " return self.size\n",
+ "\n",
+ " def __getitem__(self, idx):\n",
+ " # Return the idx-th data point of the dataset\n",
+ " # If we have multiple things to return (data point and label), we can return them as tuple\n",
+ " data_point = self.data[idx]\n",
+ " data_label = self.label[idx]\n",
+ " return data_point, data_label"
+ ],
+ "id": "85adf0a4",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "papermill": {
+ "duration": 0.053132,
+ "end_time": "2021-09-16T12:32:58.988271",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:58.935139",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "82143473"
+ },
+ "source": [
+ "Let's try to create such a dataset and inspect it:"
+ ],
+ "id": "82143473"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:59.100298Z",
+ "iopub.status.busy": "2021-09-16T12:32:59.099829Z",
+ "iopub.status.idle": "2021-09-16T12:32:59.103186Z",
+ "shell.execute_reply": "2021-09-16T12:32:59.103565Z"
+ },
+ "papermill": {
+ "duration": 0.059959,
+ "end_time": "2021-09-16T12:32:59.103683",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:59.043724",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "d35a9331",
+ "outputId": "6af17e28-2bc7-4324-b13d-8a92bfd9508c"
+ },
+ "source": [
+ "dataset = XORDataset(size=200)\n",
+ "print(\"Size of dataset:\", len(dataset))\n",
+ "print(\"Data point 0:\", dataset[0])"
+ ],
+ "id": "d35a9331",
+ "execution_count": null,
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Size of dataset: 200\n",
+ "Data point 0: (tensor([0.9632, 0.1117]), tensor(1))\n"
+ ]
+ }
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "lines_to_next_cell": 2,
+ "papermill": {
+ "duration": 0.053101,
+ "end_time": "2021-09-16T12:32:59.210237",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:59.157136",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "f8eeb814"
+ },
+ "source": [
+ "To better relate to the dataset, we visualize the samples below."
+ ],
+ "id": "f8eeb814"
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:59.324080Z",
+ "iopub.status.busy": "2021-09-16T12:32:59.323610Z",
+ "iopub.status.idle": "2021-09-16T12:32:59.325640Z",
+ "shell.execute_reply": "2021-09-16T12:32:59.325245Z"
+ },
+ "papermill": {
+ "duration": 0.060548,
+ "end_time": "2021-09-16T12:32:59.325755",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:59.265207",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "40b4cbff"
+ },
+ "source": [
+ "def visualize_samples(data, label):\n",
+ " if isinstance(data, torch.Tensor):\n",
+ " data = data.cpu().numpy()\n",
+ " if isinstance(label, torch.Tensor):\n",
+ " label = label.cpu().numpy()\n",
+ " data_0 = data[label == 0]\n",
+ " data_1 = data[label == 1]\n",
+ "\n",
+ " plt.figure(figsize=(4, 4))\n",
+ " plt.scatter(data_0[:, 0], data_0[:, 1], edgecolor=\"#333\", label=\"Class 0\")\n",
+ " plt.scatter(data_1[:, 0], data_1[:, 1], edgecolor=\"#333\", label=\"Class 1\")\n",
+ " plt.title(\"Dataset samples\")\n",
+ " plt.ylabel(r\"$x_2$\")\n",
+ " plt.xlabel(r\"$x_1$\")\n",
+ " plt.legend()"
+ ],
+ "id": "40b4cbff",
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "metadata": {
+ "execution": {
+ "iopub.execute_input": "2021-09-16T12:32:59.448747Z",
+ "iopub.status.busy": "2021-09-16T12:32:59.448284Z",
+ "iopub.status.idle": "2021-09-16T12:32:59.938181Z",
+ "shell.execute_reply": "2021-09-16T12:32:59.938567Z"
+ },
+ "papermill": {
+ "duration": 0.560114,
+ "end_time": "2021-09-16T12:32:59.938710",
+ "exception": false,
+ "start_time": "2021-09-16T12:32:59.378596",
+ "status": "completed"
+ },
+ "tags": [],
+ "id": "44e7f18f",
+ "outputId": "f35672d9-f0e6-43e0-b163-e35e481a29a5"
+ },
+ "source": [
+ "visualize_samples(dataset.data, dataset.label)\n",
+ "plt.show()"
+ ],
+ "id": "44e7f18f",
+ "execution_count": null,
+ "outputs": [
+ {
+ "data": {
+ "application/pdf": "JVBERi0xLjQKJazcIKu6CjEgMCBvYmoKPDwgL1BhZ2VzIDIgMCBSIC9UeXBlIC9DYXRhbG9nID4+CmVuZG9iago4IDAgb2JqCjw8IC9FeHRHU3RhdGUgNCAwIFIgL0ZvbnQgMyAwIFIgL1BhdHRlcm4gNSAwIFIKL1Byb2NTZXQgWyAvUERGIC9UZXh0IC9JbWFnZUIgL0ltYWdlQyAvSW1hZ2VJIF0gL1NoYWRpbmcgNiAwIFIKL1hPYmplY3QgNyAwIFIgPj4KZW5kb2JqCjExIDAgb2JqCjw8IC9Bbm5vdHMgMTAgMCBSIC9Db250ZW50cyA5IDAgUgovR3JvdXAgPDwgL0NTIC9EZXZpY2VSR0IgL1MgL1RyYW5zcGFyZW5jeSAvVHlwZSAvR3JvdXAgPj4KL01lZGlhQm94IFsgMCAwIDI4OC45Nzc0NDU1MTg0IDI3Ny4zMDg3NSBdIC9QYXJlbnQgMiAwIFIgL1Jlc291cmNlcyA4IDAgUgovVHlwZSAvUGFnZSA+PgplbmRvYmoKOSAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDEyIDAgUiA+PgpzdHJlYW0KeJytm0uvHLcRhffzK3ppL26LxTeXVhQbMJCFEyFZBFkIiqxYkJTYsuP8/HyHPfdO91XNSAacIIEvzSaL9Th1qsix5c3pyVe2vP6whOUN//t1seWb5cmzV//94eWrP3/zdHn54RQYf3eKva+jtZwrf77d/xlbW1PorTAcDn/963R6f2J1vviGhV+fTiWuVpLFsqS2lpyYxtKtrOnR6Nv9aCx5HX0bvqywH2Wn708/Ls7yMaY1LtHamvPy06vlb8v75clXcR55tWil9spW/JHr+T/tFNYWSu8xlTiWn14j+xv+9yuTIie5+t3y6LvTj5xe6rNlVLSSem2jJVt6Qnm1pFBKqMvLd8uTP4Xl2b8fZlsqa8tFc3LkL1uthzjKYGNneqzr4IS92AhxubO1ppIt5TC8xe+shDXUGrBeroO/c1hrHGbVsjcfVWLPGizXstwxN6Wc6hi5e7NTWFNGA4kJabGy5h5LLz2V5p202YrQOEyKmM34mu/iqBayd9Q7C2uvvaeYQ6pLwxSlmulrd3bqa0gNPcaaddSSVrNgtTR2c8TJaeWkIYVRel2kqdF6TaOO7s1nA0ypuXmMlKTLujY27K1iRG8HfDSYjd4xUV+s9tVCDy3hT+4GOO5oiCO/2rTbG+qxYa5AZe38+2yZQy95Re2xjpyjaysjTIY8NofaJD0h1Rs2ybh38pcPKWJfYwNifdW3o2Ji17iVsyYkx2NwzJFXonG0kWr0zorqcw8F6UPGsqEi2VAUeILEvjYMma30MJaYV5yX8FKUuCfNeQ094vEjjX4fAzmg+JHdEJRjlhBTbTqqtbqWJjdLPbua6WuPMZpCHOkAQaFgaxbdmJKjt9hHTJG4uDOT3awmmdUVx1AeIYQrELibn+JG3WJwdXlnKAgnbHJc04GxRRYi1Mge3g58wFpWwwhpzB1iiynknq7ACLAxCO0YRgLF2BFn6q0n8G8M1x3S2nNp6IUIn7He0BfgQMB5O2QpkQVNkRuJKyYXAs11iLpG47hoEmGImm5gCg5VfGDIzEi9ICzYdMbANmK2kcz1ZVJLLYAqsSr3IzmlCvS3VvzQApqIogZmE0msHnoGYYsPVKawLiHXjDsS5UD4YKb+9GFqZe3RQA+LSkEFP6i1u4oxHMHAO2Zg16Jj4BUlZveYAzdH7MjJirwGNxsjBDKL65YBH0DnVhVXqD2uHBjrFozthfggHQB8QzEYlb9zIcqLH7NllcxZ2N05pebmyKGvRjjBB5ThsxOJUWIJHW+35lt0rCCR8gfaIcJI0dZIsoEvvCyb1kR6B2FIhUuKzCZmJgb58oDsKKOalKnT4DJ4P/Fdk+sDeHiAEZBsgW6QFnIAxHURho9n97VWshL6IIXLefPooE/0nRf/IvHgIZFwsFAkWR8s755UwZaETMwiawrmiXEYA5DmT68rzhdAlhL1dzVcMgOULOEeFYGJ1mpiBnlRGkqRPMvn7mHvRIFIs0hhecgvUX/JbaZxHy+BL8hTSKmMwZEraRTK0APZ6iog4BFsQWYem7mAktZQkvuFRMgDHtyTqI0JIHA+HCK4rm+GFjGBidos+J4R6rkhnY+uIgbsXtgiTZIGc2kRLgCv9U9cUBHBBxNlA80HcAbplyTnhSKQQ0YrKAhmLJTIUEixsO6boALHbBAjSahsJoG3WR8Ejasg4k/5h7RsYi5pDeAnkZ+ylx0gloUAr+QSEBXpQohRcNh8cVAP+QCODybrYxPp5gNzU08vIsUs3s1gpYlsSiaJGev5LBZaZ5AiyhmrSu5YFc8pgJ0HDaheaA//INRZPKAjSKnA3M0NsBs0H4Lyp2yLdDLWEFxdIVJQtI5KMiQ5xLXL0AP491GZcCIusvIw9J1Q5ssrsDapCDQRaox3jo05II14WnJJ8t0gVZJPOhAiK0exO2Knuma1tLYSUiZSq3jUSpZrAs3s6UYoj9ZKhOQvglhtQ53pohouLkBGc4iyiIp3mCmHFSX0YY2En2JXGGFjnKDnOl34SvUAfy2imA31qPYApyqFGbp0Y4oNVFUBsLIjahR+Q3b9JE6mVc5WRE8rQcFJKdXFWBmpEqkYSQBIEllLhI7KBVwfC/Jf8Eb5j616qqRwMvOVXGV4DFgGGpTpoKge9IRqdh/MqKOoqyjy4JiiW0H+2TJQcgU92ipSkZhC/En1nFY13mie+KLDgDcADPFeKOJw/c5ho1tnANZjbYq9TvE2tr9TA5gIMDeestYnI8JvKUxEHQbGCJgu+RuoICd7V4Iqb1hYRGIGYeDJP7AmboA9KQo7OAmKYWlzsSaCddP0uDq6RLPUb8L64PN0ajCVzNax55Y7G3kTLyr+fEzJciBrzUsFyAhWknNzXT4SIiAjTgZcUJHBYoDh4vcdZtmiLM5/SQdb9Ytexbtj8HyetKcOCUgUNjupJsGuFMy+avKEYZacxCIqwJvE8QsM3MrUACpEKkECmUWppMve/QLD4HRQAnUn6izDAnal4Metq9/dgCQQUYO0Q5hWGALAAA77rAXCR4VXEH3MGk+gT0KpYr+ez1TZiaq3TtSGzxEiRLwLHpgVB2udbGlboyIloAAk655h0QT+Cz2GrCxiuqRhgO9aPMFdqyW4t+qADQ+Aqa7egFtPqbTDhgU4RYAJxYNSnr9xhd38707fLb+1TaekALmlKMVv1ZsDkoGyIaD8qDd3nLw8mrxryEVlvYp1iCMAVf05uAC1Ws1nfdujKBoUbzWrLSd0jY1aZLB6d2bL1lBt8qIK2Il+jTqa3GP3nvJofo5asRFJKaUJr2ArCRnzJ0+cCr5nOAnBkZSZyAdRPZygzpC7Ax5CUgVTR5MLqDGWaxQH9Q9M8GBMShWAqG78uKECnSp5H9x1gV5KjZJUnL6Y8j6BNNT78Tao60hNjlgmpS9NsTeSnCx7G6TZAgA8SPl5UY2cMAAZLnjTwRYLZJeg9pPsRz4D+YIl32JAOs49O65tNkOA09DVbPMMgDapXChwprDKtrVGbXfP5u0xxM8uXwYQ2uzikctERYenSmKZ2WjOINZgWqagRTUpur4ThSsU0tSOhH5S36yraDBfK2Qjshkcss1+JcxS9SnMO15x5SY94zoNBrFE9WeDOm25eCcFxCqVnJymCK/VZyZ15+JaFOJGgEIHYflDWsftW+LcublxInxOYoO4rvokImgqwMj77gZqCRDoKlo4HCwefWPaFIp71L4WUA9HbJT04CuAqZrRd3fSMMU0BmV+Ua+KMgv4IxN78Ye3ptyHehHszbeQuYqlxvBi407dx8xKlFZJ7hgIb9yn4WP3bfRHeIDsQAeIPbKuAALBSDam9KIc9BEHS2JINe/EOsi06g8g0RU8wM3AMnIxbh631AnjYIMe+pWIogSHYOeeZy89qqkF2kNbmhdSsaoj2oQ44gZkLEpZm2WUKw8FYFVBh7ePDUGLlB9seBrCuFTbFIspyI2LKgFdeVwJWPVbwSb+dVY3mjJkpYKBE1HTueJQiyjl4IfQgU09KqF0ZZOuGCCpHwQmwCgAKPBKRVunDKjeDnfihWRXHKckhZaqKhH25jsE5hHzaBVCdr4IIaio6bqvn0nNRPuS7vRwH8LE4LsujGztEjJgEkyNzX0wNPotLninMLvJsyU0pYcciN740idiT8SKYIwqGcvoYhYt+J4ADyNSImR8HrVCcnNpaoY0X/UqD0g0oL0sR+pHLHmyuZ6flRk4F7ElxFTg48gsn31NqrEI/KkZl7saizMTUpUqQTgf6A4GQC0AZ9q6jORlEHr4YbWqqIGKxaiuCl4NB8zwUBcwqSY412xx6l5i9s1EttWw9ZkIlQ54Ogq5uwhx1QyouvSBu3hRG3RhlnQPlKbf4D7IXnQN4gG46lbKQDXzJoCDm1kF+UPd+BEmVLEgZZR56ZN0swZsNh83kb4TIORD4ntRI3JEWDdlW/Tk0a0sfqXbTAWt6oWhNg9A4kpPeuuDbFjDvGjVNY/hBsSxqxxllEAdqy6wboXW2bKB02df+qAsgqqpFGuft8Aix6CuL8+dPDMAsbr6mjWDwWiHUoy3PsYpZBExRLkObhfVf+/JzZ3K+5nCiZpX/UpdxouxUDT46VDXZVR2BFUfkwBj69461MIzrO4VhAdjXvWhEMkxhisI4az71FYsb02cjPyjm+9hABPlNsyS+mmcPUwt90a0ukpUNw4HMZVqaiOp11ZhcpBwN3Va62LSkHmSTdq6d4RTU6O++iFLbVNAXtJbV8VOZmuZqIp+CKoza1uuUlGtO5NEEqIQNm95dSoTLD7VyaJX5Vhcx0dW1AcVVSM8NjEFNUZHQJLG//t5X0U3OarJltuVMVmIKoi4unbaJEI6C3WdlvDALUq4Ig/KISp0X637cBMaqujtNbpOTMWeVPmC9Xm7FiRjDj0YcEOWvEdaY8E+CzHyVIYW4Q1XaIL6/mKsseGTbXb5FfFEguv3Jrc3tdq2mw+USxmsW8fhp54V6NA1bikCbyIAVVIbuq4siqM0SA6ftp2tHI7C19X7oKmLhZ0wflQLKuoyND/crh3mdnCywCbFcCbJJ6TIC3i9nxZm7xTH0ZVFnU2o3lQMBtXDjigdrwQ8gFI1//WwgOyWVZq7BJZqUSpOumKqFEAotdnEGzdCupKkGr8G91QrMkUVs747KmngAbqiVOsfnqPmFtF+hdjragBR8ffZSi94A0TEX37yLLydOpBC9vzABNYkh7+iyaoSTc8/SGV5tiKbOu+5tisRm9Xvq9J10JOOAS3GB6JbVefZh4HTRMGjoAYYmW+TvJOSXQO5TKXgJGVDyWZcgQ7doVH1CWqqal6VkmkS4hhcin6X9fhG3p3U+dfzArQ03d93MXIG4EVaaIJJgJz8kPXYxEf6UvEZ0qVum8+lLBmEgVwO1GlrOt0/+lq2lhH1u5pGes61vD7xqRhMzft3abtBsZTtpdnpKQL8em4k3U2Vq3UPdTS9eVqAy1nTn54+X558LQdann9/kr/2aUS+ev7P09+XLyDE5cvlH8vzb09/fH76Ua2q7WQP/6ATfG13z169efHXX/7y4v2Hu3c/vP/lA0daONJpnuA0tDtxFfeS7wZvSd4hzjg2Fqc69gUPO3lDuMh7v/28mM3E+X773eCt7ZWK+rxhbbqX+LQAe4U9CADbp8jVCfYS7EZvilD0Pi3B7vUo6TNEKI4OYjBSWFTU7F817kZvijB0Nz8aPAn0+wwRmqMF6oV1qHo/Pqzcjd4SIcb5UKzpNVL+tAjmegJVO4QxV6Dg8LbzMnpTBEoyShhKJOqm/BkiHH3hYFLo2dZdJgXObuS2kD5ft3/BIk++judVv/jfl8vzNydd4ZLa+rzbRuYZrHMi2WzOszlvd+RLHxvUT2JEOrJ6549H3SMHvbXTfpmzk16gDNuVz+dgx+8AHRfx+9a/LP0g/mXUFV9F7lRXm3UHTAai128jyMducxFCpWaZ92AHKXbDt8VQ9QSJzHrTMwuJG0ByU45U9dxWdPogx2X4E3LoaVBXt6/E/LE593Lkm3LoTqWrPXSU4zL8CTkIqaEbiqincDflqDflGLbOpyqP7HIZ/oQcfTb29XTCJk+4Lke/JUekPA/BUOpBjt3wbTnifE9GOic13XJTu+2mrAaHV5l7FOMy/AkxMhWF7mNj255u3sC4I8RtLEncB0cfQlI1cVvVP/0+CBcvCBeXb5fwwJOOvwhwX/H7D/NPf3Gf97+79ryf+b/hNwKH2Zdlbq0e5rl214bL60P2gDIDwkWqibpk6w/a3YwUj0Z69uLnFx9e/bx8ePHuP29ffdgb7MlXabu+fP2gyPOvIGIg9LfMpHcdFCN9k7r2e6l3428ZV99JXbD9uFLrbnbTU7p4HI33c1+e9uOcK6d5xLfHccObgC3I4G5H9UPSWSEX+S6jLw/nuYy/PdlQEz+3+QDvYZxC6TL7YcfDaLisvR+/nOftcfzh9Psdd5py9f1Svxp5evrUj0Ee/6jjXD08/iHI6eYPQU5RbauzCGVOPZtcvaEGAc378aiaL82qX6ON1ErFpdEy5v2MbldTpsSc6tdVZ50PwTWuR2Rpg7uHHaOeIukZ6mE03e+3rfEwrqsgdfOXw8qF+k2N5KMcRTWSjW2NndR1ez2pHXcnrJcT7vRR93KATD1ubxp2s/WcSR3748py3drDYzmCSvyslzl7qRl1Trgf3etjP37R3n7lnaZ3chzsspN6Z8XdCYOrj3CRY7poOP+E6hFW6QnD7pueN7e+mlD+8PbFBxx9j1G3n0t83puKg2/rndR47Njnwb1/iEyZ+vgHb2K06pcwj5w66QmjfjOy91L9dGWMNOw42udWR4dO8Ouhy9aDQ+cJfI/iSreiNt9nH9w5mxbj0Ad3ZnQ71+X856GjIz/Mu/jDfsGL9+x337naRdKdV+4OtR99OP9+8KKq3ao7rV4kOFjgIu7eXJdzPdj1I/t/vuNq5VvkdHNa2zvtd6f/A2CYYAEKZW5kc3RyZWFtCmVuZG9iagoxMiAwIG9iago0ODQ0CmVuZG9iagoxMCAwIG9iagpbIF0KZW5kb2JqCjE5IDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggOTUgPj4Kc3RyZWFtCnicPYxBDsAgCATvvGI/0AQRFf/TND3Y/1+7RtsLTHZhSjcoDiucVRXFG84kHz6SvcNax5CimUdDnN3cFg5LjRSrWBYWnmERpLQ1zPi8KGtgSinqaWf1v7vlegH/nxwsCmVuZHN0cmVhbQplbmRvYmoKMTcgMCBvYmoKPDwgL0Jhc2VGb250IC9EZWphVnVTYW5zLU9ibGlxdWUgL0NoYXJQcm9jcyAxOCAwIFIKL0VuY29kaW5nIDw8IC9EaWZmZXJlbmNlcyBbIDEyMCAveCBdIC9UeXBlIC9FbmNvZGluZyA+PiAvRmlyc3RDaGFyIDAKL0ZvbnRCQm94IFsgLTEwMTYgLTM1MSAxNjYwIDEwNjggXSAvRm9udERlc2NyaXB0b3IgMTYgMCBSCi9Gb250TWF0cml4IFsgMC4wMDEgMCAwIDAuMDAxIDAgMCBdIC9MYXN0Q2hhciAyNTUgL05hbWUgL0RlamFWdVNhbnMtT2JsaXF1ZQovU3VidHlwZSAvVHlwZTMgL1R5cGUgL0ZvbnQgL1dpZHRocyAxNSAwIFIgPj4KZW5kb2JqCjE2IDAgb2JqCjw8IC9Bc2NlbnQgOTI5IC9DYXBIZWlnaHQgMCAvRGVzY2VudCAtMjM2IC9GbGFncyA5NgovRm9udEJCb3ggWyAtMTAxNiAtMzUxIDE2NjAgMTA2OCBdIC9Gb250TmFtZSAvRGVqYVZ1U2Fucy1PYmxpcXVlCi9JdGFsaWNBbmdsZSAwIC9NYXhXaWR0aCAxMzUwIC9TdGVtViAwIC9UeXBlIC9Gb250RGVzY3JpcHRvciAvWEhlaWdodCAwID4+CmVuZG9iagoxNSAwIG9iagpbIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwCjYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgMzE4IDQwMSA0NjAgODM4IDYzNgo5NTAgNzgwIDI3NSAzOTAgMzkwIDUwMCA4MzggMzE4IDM2MSAzMTggMzM3IDYzNiA2MzYgNjM2IDYzNiA2MzYgNjM2IDYzNiA2MzYKNjM2IDYzNiAzMzcgMzM3IDgzOCA4MzggODM4IDUzMSAxMDAwIDY4NCA2ODYgNjk4IDc3MCA2MzIgNTc1IDc3NSA3NTIgMjk1CjI5NSA2NTYgNTU3IDg2MyA3NDggNzg3IDYwMyA3ODcgNjk1IDYzNSA2MTEgNzMyIDY4NCA5ODkgNjg1IDYxMSA2ODUgMzkwIDMzNwozOTAgODM4IDUwMCA1MDAgNjEzIDYzNSA1NTAgNjM1IDYxNSAzNTIgNjM1IDYzNCAyNzggMjc4IDU3OSAyNzggOTc0IDYzNCA2MTIKNjM1IDYzNSA0MTEgNTIxIDM5MiA2MzQgNTkyIDgxOCA1OTIgNTkyIDUyNSA2MzYgMzM3IDYzNiA4MzggNjAwIDYzNiA2MDAgMzE4CjM1MiA1MTggMTAwMCA1MDAgNTAwIDUwMCAxMzUwIDYzNSA0MDAgMTA3MCA2MDAgNjg1IDYwMCA2MDAgMzE4IDMxOCA1MTggNTE4CjU5MCA1MDAgMTAwMCA1MDAgMTAwMCA1MjEgNDAwIDEwMjggNjAwIDUyNSA2MTEgMzE4IDQwMSA2MzYgNjM2IDYzNiA2MzYgMzM3CjUwMCA1MDAgMTAwMCA0NzEgNjE3IDgzOCAzNjEgMTAwMCA1MDAgNTAwIDgzOCA0MDEgNDAxIDUwMCA2MzYgNjM2IDMxOCA1MDAKNDAxIDQ3MSA2MTcgOTY5IDk2OSA5NjkgNTMxIDY4NCA2ODQgNjg0IDY4NCA2ODQgNjg0IDk3NCA2OTggNjMyIDYzMiA2MzIgNjMyCjI5NSAyOTUgMjk1IDI5NSA3NzUgNzQ4IDc4NyA3ODcgNzg3IDc4NyA3ODcgODM4IDc4NyA3MzIgNzMyIDczMiA3MzIgNjExIDYwOAo2MzAgNjEzIDYxMyA2MTMgNjEzIDYxMyA2MTMgOTk1IDU1MCA2MTUgNjE1IDYxNSA2MTUgMjc4IDI3OCAyNzggMjc4IDYxMiA2MzQKNjEyIDYxMiA2MTIgNjEyIDYxMiA4MzggNjEyIDYzNCA2MzQgNjM0IDYzNCA1OTIgNjM1IDU5MiBdCmVuZG9iagoxOCAwIG9iago8PCAveCAxOSAwIFIgPj4KZW5kb2JqCjI0IDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggMjM1ID4+CnN0cmVhbQp4nDVRSW4AMQi75xX+QKWwJ++Zquqh/f+1hlEvAwPY2CTvwUYkPsSQ7ihXfMrqNMvwO1nkxc9K4eS9iAqkKsIKaQfPclYzDJ4bmQKXM/FZZj6ZFjsWUE3EcXbkNINBiGlcR8vpMNM86Am5PhhxY6dZrmJI691Svb7X8p8qykfW3Sy3TtnUSt2iZ+xJXHZeT21pXxh1FDcFkQ4fO7wH+SLmLC46kW72mymHlaQhOC2AH4mhVM8OrxEmfmYkeMqeTu+jNLz2QdP1vXtBR24mZCq3UEYqnqw0xoyh+o1oJqnv/4Ge9b2+/gBDTVS5CmVuZHN0cmVhbQplbmRvYmoKMjUgMCBvYmoKPDwgL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAxNjQgPj4Kc3RyZWFtCnicPZDBEUMhCETvVrElgIBAPclkcvi//2tAk1xkHWD3qTuBkFGHM8Nn4smD07E0cG8VjGsIryP0CE0Ck8DEwZp4DAsBp2GRYy7fVZZVp5Wumo2e171jQdVplzUNbdqB8q2PP8I13qPwGuweQgexKHRuZVoLmVg8a5w7zKPM535O23c9GK2m1Kw3ctnXPTrL1FBeWvuEzmi0/SfXL7sxXh+FFDkICmVuZHN0cmVhbQplbmRvYmoKMjYgMCBvYmoKPDwgL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAzMDcgPj4Kc3RyZWFtCnicPZJLbgMxDEP3PoUuEMD62Z7zpCi6mN5/2ycl6Yoc2RZFapa6TFlTHpA0k4R/6fBwsZ3yO2zPZmbgWqKXieWU59AVYu6ifNnMRl1ZJ8XqhGY6t+hRORcHNk2qn6sspd0ueA7XJp5b9hE/vNCgHtQ1Lgk3dFejZSk0Y6r7f9J7/Iwy4GpMXWxSq3sfPF5EVejoB0eJImOXF+fjQQnpSsJoWoiVd0UDQe7ytMp7Ce7b3mrIsgepmM47KWaw63RSLm4XhyEeyPKo8OWj2GtCz/iwKyX0SNiGM3In7mjG5tTI4pD+3o0ES4+uaCHz4K9u1i5gvFM6RWJkTnKsaYtVTvdQFNO5w70MEPVsRUMpc5HV6l/DzgtrlmwWeEr6BR6j3SZLDlbZ26hO76082dD3H1rXdB8KZW5kc3RyZWFtCmVuZG9iagoyNyAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDI0OSA+PgpzdHJlYW0KeJw9UDuORCEM6zmFL/Ak8iNwHkarLWbv364DmilQTH62MyTQEYFHDDGUr+MlraCugb+LQvFu4uuDwiCrQ1IgznoPiHTspjaREzodnDM/YTdjjsBFMQac6XSmPQcmOfvCCoRzG2XsVkgniaoijuozjimeKnufeBYs7cg2WyeSPeQg4VJSicmln5TKP23KlAo6ZtEELBK54GQTTTjLu0lSjBmUMuoepnYifaw8yKM66GRNzqwjmdnTT9uZ+Bxwt1/aZE6Vx3QezPictM6DORW69+OJNgdNjdro7PcTaSovUrsdWp1+dRKV3RjnGBKXZ38Z32T/+Qf+h1oiCmVuZHN0cmVhbQplbmRvYmoKMjggMCBvYmoKPDwgL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAzOTUgPj4Kc3RyZWFtCnicPVJLbsVACNvnFFyg0vCbz3lSVd28+29rQ1KpKryJMcYwfcqQueVLXRJxhcm3Xq5bPKZ8LltamXmIu4uNJT623JfuIbZddC6xOB1H8gsynSpEqM2q0aH4QpaFB5BO8KELwn05/uMvgMHXsA244T0yQbAk5ilCxm5RGZoSQRFh55EVqKRQn1nC31Hu6/cyBWpvjKULYxz0CbQFQm1IxALqQABE7JRUrZCOZyQTvxXdZ2IcYOfRsgGuGVRElnvsx4ipzqiMvETEPk9N+iiWTC1Wxm5TGV/8lIzUfHQFKqk08pTy0FWz0AtYiXkS9jn8SPjn1mwhhjpu1vKJ5R8zxTISzmBLOWChl+NH4NtZdRGuHbm4znSBH5XWcEy0637I9U/+dNtazXW8cgiiQOVNQfC7Dq5GscTEMj6djSl6oiywGpq8RjPBYRAR1vfDyAMa/XK8EDSnayK0WCKbtWJEjYpscz29BNZM78U51sMTwmzvndahsjMzKiGC2rqGautAdrO+83C2nz8z6KJtCmVuZHN0cmVhbQplbmRvYmoKMjkgMCBvYmoKPDwgL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAyNDkgPj4Kc3RyZWFtCnicTVFJigMwDLvnFfpAIV6TvKdDmUPn/9fKDoU5BAmvkpOWmFgLDzGEHyw9+JEhczf9G36i2btZepLJ2f+Y5yJTUfhSqC5iQl2IG8+hEfA9oWsSWbG98Tkso5lzvgcfhbgEM6EBY31JMrmo5pUhE04MdRwOWqTCuGtiw+Ja0TyN3G77RmZlJoQNj2RC3BiAiCDrArIYLJQ2NhMyWc4D7Q3JDVpg16kbUYuCK5TWCXSiVsSqzOCz5tZ2N0Mt8uCoffH6aFaXYIXRS/VYeF+FPpipmXbukkJ64U07IsweCqQyOy0rtXvE6m6B+j/LUvD9yff4Ha8PzfxcnAplbmRzdHJlYW0KZW5kb2JqCjMwIDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggOTQgPj4Kc3RyZWFtCnicRY3BEcAgCAT/VEEJCgraTyaTh/b/jRAyfGDnDu6EBQu2eUYfBZUmXhVYB0pj3FCPQL3hci3J3AUPcCd/2tBUnJbTd2mRSVUp3KQSef8OZyaQqHnRY533C2P7IzwKZW5kc3RyZWFtCmVuZG9iagozMSAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDQ3ID4+CnN0cmVhbQp4nDMyt1AwULA0ARKGFiYK5mYGCimGXJYQVi4XTCwHzALRlnAKIp7BlQYAuWcNJwplbmRzdHJlYW0KZW5kb2JqCjMyIDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggMjU4ID4+CnN0cmVhbQp4nEWRS3IEIAhE956CI4D85DyTSmUxuf82Dc5kNnaXqP2ESiOmEiznFHkwfcnyzWS26Xc5VjsbBRRFKJjJVeixAqs7U8SZa4lq62Nl5LjTOwbFG85dOalkcaOMdVR1KnBMz5X1Ud35dlmUfUcOZQrYrHMcbODKbcMYJ0abre4O94kgTydTR8XtINnwByeNfZWrK3CdbPbRSzAOBP1CE5jki0DrDIHGzVP05BLs4+N254Fgb3kRSNkQyJEhGB2Cdp1c/+LW+b3/cYY7z7UZrhzv4neY1nbHX2KSFXMBi9wpqOdrLlrXGTrekzPH5Kb7hs65YJe7g0zv+T/Wz/r+Ax4pZvoKZW5kc3RyZWFtCmVuZG9iagozMyAwIG9iago8PCAvQkJveCBbIC0xMDIxIC00NjMgMTc5NCAxMjMzIF0gL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAzOQovU3VidHlwZSAvRm9ybSAvVHlwZSAvWE9iamVjdCA+PgpzdHJlYW0KeJzjMjQwUzA2NVXI5TI3NgKzcsAsI3MjIAski2BBZDO40gAV8wp8CmVuZHN0cmVhbQplbmRvYmoKMzQgMCBvYmoKPDwgL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCA4MyA+PgpzdHJlYW0KeJxFjLsNwDAIRHumYAR+JvY+UZTC3r8NECVuuCfdPVwdCZkpbjPDQwaeDCyGXXGB9JYwC1xHUI6d7KNh1b7qBI31plLz7w+Unuys4obrAQJCGmYKZW5kc3RyZWFtCmVuZG9iagozNSAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDIzOSA+PgpzdHJlYW0KeJxNUMltBDEM+7sKNTDA6By7HgeLPLL9f0PKCZKXaEviofKUW5bKZfcjOW/JuuVDh06VafJu0M2vsf6jDAJ2/1BUEK0lsUrMXNJusTRJL9nDOI2Xa7WO56l7hFmjePDj2NMpgek9MsFms705MKs9zg6QTrjGr+rTO5UkA4m6kPNCpQrrHtQloo8r25hSnU4t5RiXn+h7fI4APcXejdzRx8sXjEa1LajRapU4DzATU9GVcauRgZQTBkNnR1c0C6XIynpCNcKNOaGZvcNwYAPLs4Skpa1SvA9lAegCXdo64zRKgo4Awt8ojPX6Bqr8XjcKZW5kc3RyZWFtCmVuZG9iagozNiAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDUxID4+CnN0cmVhbQp4nDM2tFAwUDA0MAeSRoZAlpGJQoohF0gAxMzlggnmgFkGQBqiOAeuJocrgysNAOG0DZgKZW5kc3RyZWFtCmVuZG9iagozNyAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDMzNCA+PgpzdHJlYW0KeJwtUktyxSAM23MKXaAz+AfkPOl0uni9/7aSk0VGDmD0MeWGiUp8WSC3o9bEt43MQIXhr6vMhc9I28g6iMuQi7iSLYV7RCzkMcQ8xILvq/EeHvmszMmzB8Yv2XcPK/bUhGUh48UZ2mEVx2EV5FiwdSGqe3hTpMOpJNjji/8+xXMtBC18RtCAX+Sfr47g+ZIWafeYbdOuerBMO6qksBxsT3NeJl9aZ7k6Hs8Hyfau2BFSuwIUhbkzznPhKNNWRrQWdjZIalxsb479WErQhW5cRoojkJ+pIjygpMnMJgrij5wecioDYeqarnRyG1Vxp57MNZuLtzNJZuu+SLGZwnldOLP+DFNmtXknz3Ki1KkI77FnS9DQOa6evZZZaHSbE7ykhM/GTk9Ovlcz6yE5FQmpYlpXwWkUmWIJ2xJfU1FTmnoZ/vvy7vE7fv4BLHN8cwplbmRzdHJlYW0KZW5kb2JqCjM4IDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggNzAgPj4Kc3RyZWFtCnicMzM2UzBQsDACEqamhgrmRpYKKYZcQD6IlcsFE8sBs8wszIEsIwuQlhwuQwtjMG1ibKRgZmIGZFkgMSC6MrjSAJiaEwMKZW5kc3RyZWFtCmVuZG9iagozOSAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDMyMCA+PgpzdHJlYW0KeJw1UktuBTEI288puECl8E/O86qqi777b2sTvRVMMGDjKS9Z0ku+1CXbpcPkWx/3JbFC3o/tmsxSxfcWsxTPLa9HzxG3LQoEURM9WJkvFSLUz/ToOqhwSp+BVwi3FBu8g0kAg2r4Bx6lMyBQ50DGu2IyUgOCJNhzaXEIiXImiX+kvJ7fJ62kofQ9WZnL35NLpdAdTU7oAcXKxUmgXUn5oJmYSkSSl+t9sUL0hsCSPD5HMcmA7DaJbaIFJucepSXMxBQ6sMcCvGaa1VXoYMIehymMVwuzqB5s8lsTlaQdreMZ2TDeyzBTYqHhsAXU5mJlgu7l4zWvwojtUZNdw3Duls13CNFo/hsWyuBjFZKAR6exEg1pOMCIwJ5eOMVe8xM5DsCIY52aLAxjaCaneo6JwNCes6VhxsceWvXzD1TpfIcKZW5kc3RyZWFtCmVuZG9iago0MCAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDE4ID4+CnN0cmVhbQp4nDM2tFAwgMMUQ640AB3mA1IKZW5kc3RyZWFtCmVuZG9iago0MSAwIG9iago8PCAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDEzMyA+PgpzdHJlYW0KeJxFj0sOBCEIRPecoo7Axx/ncTLphXP/7YCdbhNjPYVUgbmCoT0uawOdFR8hGbbxt6mWjkVZPlR6UlYPyeCHrMbLIdygLPCCSSqGIVCLmBqRLWVut4DbNg2yspVTpY6wi6Mwj/a0bBUeX6JbInWSP4PEKi/c47odyKXWu96ii75/pAExCQplbmRzdHJlYW0KZW5kb2JqCjQyIDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggMjUxID4+CnN0cmVhbQp4nC1RSXIDQQi7zyv0hGan32OXK4fk/9cIygcGDYtAdFrioIyfICxXvOWRq2jD3zMxgt8Fh34r121Y5EBUIEljUDWhdvF69B7YcZgJzJPWsAxmrA/8jCnc6MXhMRlnt9dl1BDsXa89mUHJrFzEJRMXTNVhI2cOP5kyLrRzPTcg50ZYl2GQblYaMxKONIVIIYWqm6TOBEESjK5GjTZyFPulL490hlWNqDHscy1tX89NOGvQ7Fis8uSUHl1xLicXL6wc9PU2AxdRaazyQEjA/W4P9XOyk994S+fOFtPje83J8sJUYMWb125ANtXi37yI4/uMr+fn+fwDX2BbiAplbmRzdHJlYW0KZW5kb2JqCjQzIDAgb2JqCjw8IC9GaWx0ZXIgL0ZsYXRlRGVjb2RlIC9MZW5ndGggMjE1ID4+CnN0cmVhbQp4nDVROQ4DIQzs9xX+QCSML3hPoijN/r/NjNFWHsFchrSUIZnyUpOoIeVTPnqZLpy63NfMajTnlrQtc4C4trwvrZLAiWaIg8FpmLgBmjwBQ9fRqFFDFx7Q1KVTKLDcBD6Kt24P3WO1gZe2IeeJIGIoGSxBzalFExZtzyekNb9eixvel+3dyFOlxpYYgQYBVjgc1+jX8JU9TybRdBUy1Ks1yxgJE0UiPPmOptUT61o00jIS1MYRrGoDvDv9ME4AABNxywJkn0qUs+TEb7H0swZX+v4Bn0dUlgplbmRzdHJlYW0KZW5kb2JqCjIyIDAgb2JqCjw8IC9CYXNlRm9udCAvRGVqYVZ1U2FucyAvQ2hhclByb2NzIDIzIDAgUgovRW5jb2RpbmcgPDwKL0RpZmZlcmVuY2VzIFsgMzIgL3NwYWNlIDQ2IC9wZXJpb2QgNDggL3plcm8gL29uZSAvdHdvIDUyIC9mb3VyIC9maXZlIC9zaXggL3NldmVuCi9laWdodCA2NyAvQyAvRCA5NyAvYSAxMDEgL2UgMTA4IC9sIC9tIDExMiAvcCAxMTUgL3MgL3QgXQovVHlwZSAvRW5jb2RpbmcgPj4KL0ZpcnN0Q2hhciAwIC9Gb250QkJveCBbIC0xMDIxIC00NjMgMTc5NCAxMjMzIF0gL0ZvbnREZXNjcmlwdG9yIDIxIDAgUgovRm9udE1hdHJpeCBbIDAuMDAxIDAgMCAwLjAwMSAwIDAgXSAvTGFzdENoYXIgMjU1IC9OYW1lIC9EZWphVnVTYW5zCi9TdWJ0eXBlIC9UeXBlMyAvVHlwZSAvRm9udCAvV2lkdGhzIDIwIDAgUiA+PgplbmRvYmoKMjEgMCBvYmoKPDwgL0FzY2VudCA5MjkgL0NhcEhlaWdodCAwIC9EZXNjZW50IC0yMzYgL0ZsYWdzIDMyCi9Gb250QkJveCBbIC0xMDIxIC00NjMgMTc5NCAxMjMzIF0gL0ZvbnROYW1lIC9EZWphVnVTYW5zIC9JdGFsaWNBbmdsZSAwCi9NYXhXaWR0aCAxMzQyIC9TdGVtViAwIC9UeXBlIC9Gb250RGVzY3JpcHRvciAvWEhlaWdodCAwID4+CmVuZG9iagoyMCAwIG9iagpbIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwCjYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgNjAwIDYwMCA2MDAgMzE4IDQwMSA0NjAgODM4IDYzNgo5NTAgNzgwIDI3NSAzOTAgMzkwIDUwMCA4MzggMzE4IDM2MSAzMTggMzM3IDYzNiA2MzYgNjM2IDYzNiA2MzYgNjM2IDYzNiA2MzYKNjM2IDYzNiAzMzcgMzM3IDgzOCA4MzggODM4IDUzMSAxMDAwIDY4NCA2ODYgNjk4IDc3MCA2MzIgNTc1IDc3NSA3NTIgMjk1CjI5NSA2NTYgNTU3IDg2MyA3NDggNzg3IDYwMyA3ODcgNjk1IDYzNSA2MTEgNzMyIDY4NCA5ODkgNjg1IDYxMSA2ODUgMzkwIDMzNwozOTAgODM4IDUwMCA1MDAgNjEzIDYzNSA1NTAgNjM1IDYxNSAzNTIgNjM1IDYzNCAyNzggMjc4IDU3OSAyNzggOTc0IDYzNCA2MTIKNjM1IDYzNSA0MTEgNTIxIDM5MiA2MzQgNTkyIDgxOCA1OTIgNTkyIDUyNSA2MzYgMzM3IDYzNiA4MzggNjAwIDYzNiA2MDAgMzE4CjM1MiA1MTggMTAwMCA1MDAgNTAwIDUwMCAxMzQyIDYzNSA0MDAgMTA3MCA2MDAgNjg1IDYwMCA2MDAgMzE4IDMxOCA1MTggNTE4CjU5MCA1MDAgMTAwMCA1MDAgMTAwMCA1MjEgNDAwIDEwMjMgNjAwIDUyNSA2MTEgMzE4IDQwMSA2MzYgNjM2IDYzNiA2MzYgMzM3CjUwMCA1MDAgMTAwMCA0NzEgNjEyIDgzOCAzNjEgMTAwMCA1MDAgNTAwIDgzOCA0MDEgNDAxIDUwMCA2MzYgNjM2IDMxOCA1MDAKNDAxIDQ3MSA2MTIgOTY5IDk2OSA5NjkgNTMxIDY4NCA2ODQgNjg0IDY4NCA2ODQgNjg0IDk3NCA2OTggNjMyIDYzMiA2MzIgNjMyCjI5NSAyOTUgMjk1IDI5NSA3NzUgNzQ4IDc4NyA3ODcgNzg3IDc4NyA3ODcgODM4IDc4NyA3MzIgNzMyIDczMiA3MzIgNjExIDYwNQo2MzAgNjEzIDYxMyA2MTMgNjEzIDYxMyA2MTMgOTgyIDU1MCA2MTUgNjE1IDYxNSA2MTUgMjc4IDI3OCAyNzggMjc4IDYxMiA2MzQKNjEyIDYxMiA2MTIgNjEyIDYxMiA4MzggNjEyIDYzNCA2MzQgNjM0IDYzNCA1OTIgNjM1IDU5MiBdCmVuZG9iagoyMyAwIG9iago8PCAvQyAyNCAwIFIgL0QgMjUgMCBSIC9hIDI2IDAgUiAvZSAyNyAwIFIgL2VpZ2h0IDI4IDAgUiAvZml2ZSAyOSAwIFIKL2ZvdXIgMzAgMCBSIC9sIDMxIDAgUiAvbSAzMiAwIFIgL29uZSAzNCAwIFIgL3AgMzUgMCBSIC9wZXJpb2QgMzYgMCBSCi9zIDM3IDAgUiAvc2V2ZW4gMzggMCBSIC9zaXggMzkgMCBSIC9zcGFjZSA0MCAwIFIgL3QgNDEgMCBSIC90d28gNDIgMCBSCi96ZXJvIDQzIDAgUiA+PgplbmRvYmoKMyAwIG9iago8PCAvRjEgMjIgMCBSIC9GMiAxNyAwIFIgPj4KZW5kb2JqCjQgMCBvYmoKPDwgL0ExIDw8IC9DQSAwIC9UeXBlIC9FeHRHU3RhdGUgL2NhIDEgPj4KL0EyIDw8IC9DQSAxIC9UeXBlIC9FeHRHU3RhdGUgL2NhIDEgPj4KL0EzIDw8IC9DQSAwLjggL1R5cGUgL0V4dEdTdGF0ZSAvY2EgMC44ID4+ID4+CmVuZG9iago1IDAgb2JqCjw8ID4+CmVuZG9iago2IDAgb2JqCjw8ID4+CmVuZG9iago3IDAgb2JqCjw8IC9GMS1EZWphVnVTYW5zLW1pbnVzIDMzIDAgUiAvTTAgMTMgMCBSIC9NMSAxNCAwIFIgPj4KZW5kb2JqCjEzIDAgb2JqCjw8IC9CQm94IFsgLTggLTggOCA4IF0gL0ZpbHRlciAvRmxhdGVEZWNvZGUgL0xlbmd0aCAxMzEgL1N1YnR5cGUgL0Zvcm0KL1R5cGUgL1hPYmplY3QgPj4Kc3RyZWFtCnicbZBBDoQgDEX3PUUv8ElLRWXr0mu4mUzi/bcDcUBM3TTQvjx+Uf6S8E6lwPgkCUtOs+R605DSukyMGObVsijHoFEt1s51OKjP0HBjdIuxFKbU1uh4o5vpNt6TP/qwWSFGPxwOr4R7FkMmXCkxBoffCy/bw/8Rnl7UwB+ijX5jWkP9CmVuZHN0cmVhbQplbmRvYmoKMTQgMCBvYmoKPDwgL0JCb3ggWyAtOCAtOCA4IDggXSAvRmlsdGVyIC9GbGF0ZURlY29kZSAvTGVuZ3RoIDEzMSAvU3VidHlwZSAvRm9ybQovVHlwZSAvWE9iamVjdCA+PgpzdHJlYW0KeJxtkEEOhCAMRfc9RS/wSUtFZevSa7iZTOL9twNxQEzdNNC+PH5R/pLwTqXA+CQJS06z5HrTkNK6TIwY5tWyKMegUS3WznU4qM/QcGN0i7EUptTW6Hijm+k23pM/+rBZIUY/HA6vhHsWQyZcKTEGh98LL9vD/xGeXtTAH6KNfmNaQ/0KZW5kc3RyZWFtCmVuZG9iagoyIDAgb2JqCjw8IC9Db3VudCAxIC9LaWRzIFsgMTEgMCBSIF0gL1R5cGUgL1BhZ2VzID4+CmVuZG9iago0NCAwIG9iago8PCAvQ3JlYXRpb25EYXRlIChEOjIwMjEwOTE2MTQzMjU5KzAyJzAwJykKL0NyZWF0b3IgKE1hdHBsb3RsaWIgdjMuNC4zLCBodHRwczovL21hdHBsb3RsaWIub3JnKQovUHJvZHVjZXIgKE1hdHBsb3RsaWIgcGRmIGJhY2tlbmQgdjMuNC4zKSA+PgplbmRvYmoKeHJlZgowIDQ1CjAwMDAwMDAwMDAgNjU1MzUgZiAKMDAwMDAwMDAxNiAwMDAwMCBuIAowMDAwMDE1MTI1IDAwMDAwIG4gCjAwMDAwMTQzMTkgMDAwMDAgbiAKMDAwMDAxNDM2MiAwMDAwMCBuIAowMDAwMDE0NTA0IDAwMDAwIG4gCjAwMDAwMTQ1MjUgMDAwMDAgbiAKMDAwMDAxNDU0NiAwMDAwMCBuIAowMDAwMDAwMDY1IDAwMDAwIG4gCjAwMDAwMDA0MDUgMDAwMDAgbiAKMDAwMDAwNTM0NSAwMDAwMCBuIAowMDAwMDAwMjA4IDAwMDAwIG4gCjAwMDAwMDUzMjQgMDAwMDAgbiAKMDAwMDAxNDYxNyAwMDAwMCBuIAowMDAwMDE0ODcxIDAwMDAwIG4gCjAwMDAwMDYwNTYgMDAwMDAgbiAKMDAwMDAwNTg0OCAwMDAwMCBuIAowMDAwMDA1NTMyIDAwMDAwIG4gCjAwMDAwMDcxMDkgMDAwMDAgbiAKMDAwMDAwNTM2NSAwMDAwMCBuIAowMDAwMDEzMDIyIDAwMDAwIG4gCjAwMDAwMTI4MjIgMDAwMDAgbiAKMDAwMDAxMjQwNiAwMDAwMCBuIAowMDAwMDE0MDc1IDAwMDAwIG4gCjAwMDAwMDcxNDEgMDAwMDAgbiAKMDAwMDAwNzQ0OSAwMDAwMCBuIAowMDAwMDA3Njg2IDAwMDAwIG4gCjAwMDAwMDgwNjYgMDAwMDAgbiAKMDAwMDAwODM4OCAwMDAwMCBuIAowMDAwMDA4ODU2IDAwMDAwIG4gCjAwMDAwMDkxNzggMDAwMDAgbiAKMDAwMDAwOTM0NCAwMDAwMCBuIAowMDAwMDA5NDYzIDAwMDAwIG4gCjAwMDAwMDk3OTQgMDAwMDAgbiAKMDAwMDAwOTk2NiAwMDAwMCBuIAowMDAwMDEwMTIxIDAwMDAwIG4gCjAwMDAwMTA0MzMgMDAwMDAgbiAKMDAwMDAxMDU1NiAwMDAwMCBuIAowMDAwMDEwOTYzIDAwMDAwIG4gCjAwMDAwMTExMDUgMDAwMDAgbiAKMDAwMDAxMTQ5OCAwMDAwMCBuIAowMDAwMDExNTg4IDAwMDAwIG4gCjAwMDAwMTE3OTQgMDAwMDAgbiAKMDAwMDAxMjExOCAwMDAwMCBuIAowMDAwMDE1MTg1IDAwMDAwIG4gCnRyYWlsZXIKPDwgL0luZm8gNDQgMCBSIC9Sb290IDEgMCBSIC9TaXplIDQ1ID4+CnN0YXJ0eHJlZgoxNTM0MgolJUVPRgo=\n",
+ "image/svg+xml": [
+ "\n",
+ "\n",
+ "\n"
+ ],
+ "text/plain": [
+ "