|
| 1 | +""" |
| 2 | +Defining a Neural Network in PyTorch |
| 3 | +==================================== |
| 4 | +Deep learning uses artificial neural networks (models), which are |
| 5 | +computing systems that are composed of many layers of interconnected |
| 6 | +units. By passing data through these interconnected units, a neural |
| 7 | +network is able to learn how to approximate the computations required to |
| 8 | +transform inputs into outputs. In PyTorch, neural networks can be |
| 9 | +constructed using the ``torch.nn`` package. |
| 10 | +
|
| 11 | +Introduction |
| 12 | +------------ |
| 13 | +PyTorch provides the elegantly designed modules and classes, including |
| 14 | +``torch.nn``, to help you create and train neural networks. An |
| 15 | +``nn.Module`` contains layers, and a method ``forward(input)`` that |
| 16 | +returns the ``output``. |
| 17 | +
|
| 18 | +In this recipe, we will use ``torch.nn`` to define a neural network |
| 19 | +intended for the `MNIST |
| 20 | +dataset <https://pytorch.org/docs/stable/torchvision/datasets.html#mnist>`__. |
| 21 | +
|
| 22 | +Setup |
| 23 | +----- |
| 24 | +Before we begin, we need to install ``torch`` if it isn’t already |
| 25 | +available. |
| 26 | +
|
| 27 | +:: |
| 28 | +
|
| 29 | + pip install torchaudio |
| 30 | +
|
| 31 | +
|
| 32 | +""" |
| 33 | + |
| 34 | + |
| 35 | +###################################################################### |
| 36 | +# Steps |
| 37 | +# ----- |
| 38 | +# |
| 39 | +# 1. Import all necessary libraries for loading our data |
| 40 | +# 2. Define and intialize the neural network |
| 41 | +# 3. Specify how data will pass through your model |
| 42 | +# 4. [Optional] Pass data through your model to test |
| 43 | +# |
| 44 | +# 1. Import necessary libraries for loading our data |
| 45 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 46 | +# |
| 47 | +# For this recipe, we will use ``torch`` and its subsidiaries ``torch.nn`` |
| 48 | +# and ``torch.nn.functional``. |
| 49 | +# |
| 50 | + |
| 51 | +import torch |
| 52 | +import torch.nn as nn |
| 53 | +import torch.nn.functional as F |
| 54 | + |
| 55 | + |
| 56 | +###################################################################### |
| 57 | +# 2. Define and intialize the neural network |
| 58 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 59 | +# |
| 60 | +# Our network will recognize images. We will use a process built into |
| 61 | +# PyTorch called convolution. Convolution adds each element of an image to |
| 62 | +# its local neighbors, weighted by a kernel, or a small martrix, that |
| 63 | +# helps us extract certain features (like edge detection, sharpness, |
| 64 | +# blurriness, etc.) from the input image. |
| 65 | +# |
| 66 | +# There are two requirements for defining the ``Net`` class of your model. |
| 67 | +# The first is writing an ``__init__`` function that references |
| 68 | +# ``nn.Module``. This function is where you define the fully connected |
| 69 | +# layers in your neural network. |
| 70 | +# |
| 71 | +# Using convolution, we will define our model to take 1 input image |
| 72 | +# channel, and output match our target of 10 labels representing numbers 0 |
| 73 | +# through 9. This algorithm is yours to create, we will follow a standard |
| 74 | +# MNIST algorithm. |
| 75 | +# |
| 76 | + |
| 77 | +class Net(nn.Module): |
| 78 | + def __init__(self): |
| 79 | + super(Net, self).__init__() |
| 80 | + |
| 81 | + # First 2D convolutional layer, taking in 1 input channel (image), |
| 82 | + # outputting 32 convolutional features, with a square kernel size of 3 |
| 83 | + self.conv1 = nn.Conv2d(1, 32, 3, 1) |
| 84 | + # Second 2D convolutional layer, taking in the 32 input layers, |
| 85 | + # outputting 64 convolutional features, with a square kernel size of 3 |
| 86 | + self.conv2 = nn.Conv2d(32, 64, 3, 1) |
| 87 | + |
| 88 | + # Designed to ensure that adjacent pixels are either all 0s or all active |
| 89 | + # with an input probability |
| 90 | + self.dropout1 = nn.Dropout2d(0.25) |
| 91 | + self.dropout2 = nn.Dropout2d(0.5) |
| 92 | + |
| 93 | + # First fully connected layer |
| 94 | + self.fc1 = nn.Linear(9216, 128) |
| 95 | + # Second fully connected layer that outputs our 10 labels |
| 96 | + self.fc2 = nn.Linear(128, 10) |
| 97 | + |
| 98 | +my_nn = Net() |
| 99 | +print(my_nn) |
| 100 | + |
| 101 | + |
| 102 | +###################################################################### |
| 103 | +# We have finished defining our neural network, now we have to define how |
| 104 | +# our data will pass through it. |
| 105 | +# |
| 106 | +# 3. Specify how data will pass through your model |
| 107 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 108 | +# |
| 109 | +# When you use PyTorch to build a model, you just have to define the |
| 110 | +# ``forward`` function, that will pass the data into the computation graph |
| 111 | +# (i.e. our neural network). This will represent our feed-forward |
| 112 | +# algorithm. |
| 113 | +# |
| 114 | +# You can use any of the Tensor operations in the ``forward`` function. |
| 115 | +# |
| 116 | + |
| 117 | +class Net(nn.Module): |
| 118 | + def __init__(self): |
| 119 | + super(Net, self).__init__() |
| 120 | + self.conv1 = nn.Conv2d(1, 32, 3, 1) |
| 121 | + self.conv2 = nn.Conv2d(32, 64, 3, 1) |
| 122 | + self.dropout1 = nn.Dropout2d(0.25) |
| 123 | + self.dropout2 = nn.Dropout2d(0.5) |
| 124 | + self.fc1 = nn.Linear(9216, 128) |
| 125 | + self.fc2 = nn.Linear(128, 10) |
| 126 | + |
| 127 | + # x represents our data |
| 128 | + def forward(self, x): |
| 129 | + # Pass data through conv1 |
| 130 | + x = self.conv1(x) |
| 131 | + # Use the rectified-linear activation function over x |
| 132 | + x = F.relu(x) |
| 133 | + |
| 134 | + x = self.conv2(x) |
| 135 | + x = F.relu(x) |
| 136 | + |
| 137 | + # Run max pooling over x |
| 138 | + x = F.max_pool2d(x, 2) |
| 139 | + # Pass data through dropout1 |
| 140 | + x = self.dropout1(x) |
| 141 | + # Flatten x with start_dim=1 |
| 142 | + x = torch.flatten(x, 1) |
| 143 | + # Pass data through fc1 |
| 144 | + x = self.fc1(x) |
| 145 | + x = F.relu(x) |
| 146 | + x = self.dropout2(x) |
| 147 | + x = self.fc2(x) |
| 148 | + |
| 149 | + # Apply softmax to x |
| 150 | + output = F.log_softmax(x, dim=1) |
| 151 | + return output |
| 152 | + |
| 153 | + |
| 154 | +###################################################################### |
| 155 | +# 4. [Optional] Pass data through your model to test |
| 156 | +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 157 | +# |
| 158 | +# To ensure we receive our desired output, let’s test our model by passing |
| 159 | +# some random data through it. |
| 160 | +# |
| 161 | + |
| 162 | +# Equates to one random 28x28 image |
| 163 | +random_data = torch.rand((1, 1, 28, 28)) |
| 164 | + |
| 165 | +my_nn = Net() |
| 166 | +result = my_nn(random_data) |
| 167 | +print (result) |
| 168 | + |
| 169 | + |
| 170 | +###################################################################### |
| 171 | +# Each number in this resulting tensor equates to the prediction of the |
| 172 | +# label the random tensor is associated to. |
| 173 | +# |
| 174 | +# Congratulations! You have successfully defined a neural network in |
| 175 | +# PyTorch. |
| 176 | +# |
| 177 | +# Learn More |
| 178 | +# ---------- |
| 179 | +# |
| 180 | +# Take a look at these other recipes to continue your learning: |
| 181 | +# |
| 182 | +# - TBD |
| 183 | +# - TBD |
0 commit comments