24
24
To run this tutorial, you’ll need to install PyTorch, TorchVision,
25
25
Matplotlib, and TensorBoard.
26
26
27
- With ``conda``:
28
-
29
- .. code-block:: sh
30
-
31
- conda install pytorch torchvision -c pytorch
32
- conda install matplotlib tensorboard
33
-
34
27
With ``pip``:
35
28
36
29
.. code-block:: sh
43
36
44
37
Introduction
45
38
------------
46
-
39
+
47
40
In this notebook, we’ll be training a variant of LeNet-5 against the
48
41
Fashion-MNIST dataset. Fashion-MNIST is a set of image tiles depicting
49
42
various garments, with ten class labels indicating the type of garment
50
- depicted.
43
+ depicted.
51
44
52
45
"""
53
46
47
+ # Image display
48
+ import matplotlib .pyplot as plt
49
+ import numpy as np
50
+
54
51
# PyTorch model and training necessities
55
52
import torch
56
53
import torch .nn as nn
61
58
import torchvision
62
59
import torchvision .transforms as transforms
63
60
64
- # Image display
65
- import matplotlib .pyplot as plt
66
- import numpy as np
67
-
68
61
# PyTorch TensorBoard support
69
62
from torch .utils .tensorboard import SummaryWriter
70
63
79
72
######################################################################
80
73
# Showing Images in TensorBoard
81
74
# -----------------------------
82
- #
75
+ #
83
76
# Let’s start by adding sample images from our dataset to TensorBoard:
84
- #
77
+ #
85
78
86
79
# Gather datasets and prepare them for consumption
87
80
transform = transforms .Compose (
88
- [transforms .ToTensor (),
89
- transforms . Normalize (( 0.5 ,), ( 0.5 ,))] )
81
+ [transforms .ToTensor (), transforms . Normalize (( 0.5 ,), ( 0.5 ,))]
82
+ )
90
83
91
84
# Store separate training and validations splits in ./data
92
- training_set = torchvision .datasets .FashionMNIST ('./data' ,
93
- download = True ,
94
- train = True ,
95
- transform = transform )
96
- validation_set = torchvision .datasets .FashionMNIST ('./data' ,
97
- download = True ,
98
- train = False ,
99
- transform = transform )
100
-
101
- training_loader = torch .utils .data .DataLoader (training_set ,
102
- batch_size = 4 ,
103
- shuffle = True ,
104
- num_workers = 2 )
105
-
106
-
107
- validation_loader = torch .utils .data .DataLoader (validation_set ,
108
- batch_size = 4 ,
109
- shuffle = False ,
110
- num_workers = 2 )
85
+ training_set = torchvision .datasets .FashionMNIST (
86
+ "./data" , download = True , train = True , transform = transform
87
+ )
88
+ validation_set = torchvision .datasets .FashionMNIST (
89
+ "./data" , download = True , train = False , transform = transform
90
+ )
91
+
92
+ training_loader = torch .utils .data .DataLoader (
93
+ training_set , batch_size = 4 , shuffle = True , num_workers = 2
94
+ )
95
+
96
+
97
+ validation_loader = torch .utils .data .DataLoader (
98
+ validation_set , batch_size = 4 , shuffle = False , num_workers = 2
99
+ )
111
100
112
101
# Class labels
113
- classes = ('T-shirt/top' , 'Trouser' , 'Pullover' , 'Dress' , 'Coat' ,
114
- 'Sandal' , 'Shirt' , 'Sneaker' , 'Bag' , 'Ankle Boot' )
102
+ classes = (
103
+ "T-shirt/top" ,
104
+ "Trouser" ,
105
+ "Pullover" ,
106
+ "Dress" ,
107
+ "Coat" ,
108
+ "Sandal" ,
109
+ "Shirt" ,
110
+ "Sneaker" ,
111
+ "Bag" ,
112
+ "Ankle Boot" ,
113
+ )
114
+
115
115
116
116
# Helper function for inline image display
117
117
def matplotlib_imshow (img , one_channel = False ):
118
118
if one_channel :
119
119
img = img .mean (dim = 0 )
120
- img = img / 2 + 0.5 # unnormalize
120
+ img = img / 2 + 0.5 # unnormalize
121
121
npimg = img .numpy ()
122
122
if one_channel :
123
123
plt .imshow (npimg , cmap = "Greys" )
124
124
else :
125
125
plt .imshow (np .transpose (npimg , (1 , 2 , 0 )))
126
126
127
+
127
128
# Extract a batch of 4 images
128
129
dataiter = iter (training_loader )
129
130
images , labels = next (dataiter )
@@ -138,14 +139,14 @@ def matplotlib_imshow(img, one_channel=False):
138
139
# minibatch of our input data. Below, we use the ``add_image()`` call on
139
140
# ``SummaryWriter`` to log the image for consumption by TensorBoard, and
140
141
# we also call ``flush()`` to make sure it’s written to disk right away.
141
- #
142
+ #
142
143
143
144
# Default log_dir argument is "runs" - but it's good to be specific
144
145
# torch.utils.tensorboard.SummaryWriter is imported above
145
- writer = SummaryWriter (' runs/fashion_mnist_experiment_1' )
146
+ writer = SummaryWriter (" runs/fashion_mnist_experiment_1" )
146
147
147
148
# Write image data to TensorBoard log dir
148
- writer .add_image (' Four Fashion-MNIST Images' , img_grid )
149
+ writer .add_image (" Four Fashion-MNIST Images" , img_grid )
149
150
writer .flush ()
150
151
151
152
# To view, start TensorBoard on the command line with:
@@ -157,17 +158,18 @@ def matplotlib_imshow(img, one_channel=False):
157
158
# If you start TensorBoard at the command line and open it in a new
158
159
# browser tab (usually at `localhost:6006 <localhost:6006>`__), you should
159
160
# see the image grid under the IMAGES tab.
160
- #
161
+ #
161
162
# Graphing Scalars to Visualize Training
162
163
# --------------------------------------
163
- #
164
+ #
164
165
# TensorBoard is useful for tracking the progress and efficacy of your
165
166
# training. Below, we’ll run a training loop, track some metrics, and save
166
167
# the data for TensorBoard’s consumption.
167
- #
168
+ #
168
169
# Let’s define a model to categorize our image tiles, and an optimizer and
169
170
# loss function for training:
170
- #
171
+ #
172
+
171
173
172
174
class Net (nn .Module ):
173
175
def __init__ (self ):
@@ -187,7 +189,7 @@ def forward(self, x):
187
189
x = F .relu (self .fc2 (x ))
188
190
x = self .fc3 (x )
189
191
return x
190
-
192
+
191
193
192
194
net = Net ()
193
195
criterion = nn .CrossEntropyLoss ()
@@ -197,7 +199,7 @@ def forward(self, x):
197
199
##########################################################################
198
200
# Now let’s train a single epoch, and evaluate the training vs. validation
199
201
# set losses every 1000 batches:
200
- #
202
+ #
201
203
202
204
print (len (validation_loader ))
203
205
for epoch in range (1 ): # loop over the dataset multiple times
@@ -213,44 +215,50 @@ def forward(self, x):
213
215
optimizer .step ()
214
216
215
217
running_loss += loss .item ()
216
- if i % 1000 == 999 : # Every 1000 mini-batches...
217
- print (' Batch {}' .format (i + 1 ))
218
+ if i % 1000 == 999 : # Every 1000 mini-batches...
219
+ print (" Batch {}" .format (i + 1 ))
218
220
# Check against the validation set
219
221
running_vloss = 0.0
220
-
222
+
221
223
# In evaluation mode some model specific operations can be omitted eg. dropout layer
222
- net .train (False ) # Switching to evaluation mode, eg. turning off regularisation
224
+ net .train (
225
+ False
226
+ ) # Switching to evaluation mode, eg. turning off regularisation
223
227
for j , vdata in enumerate (validation_loader , 0 ):
224
228
vinputs , vlabels = vdata
225
229
voutputs = net (vinputs )
226
230
vloss = criterion (voutputs , vlabels )
227
231
running_vloss += vloss .item ()
228
- net .train (True ) # Switching back to training mode, eg. turning on regularisation
229
-
232
+ net .train (
233
+ True
234
+ ) # Switching back to training mode, eg. turning on regularisation
235
+
230
236
avg_loss = running_loss / 1000
231
237
avg_vloss = running_vloss / len (validation_loader )
232
-
238
+
233
239
# Log the running loss averaged per batch
234
- writer .add_scalars ('Training vs. Validation Loss' ,
235
- { 'Training' : avg_loss , 'Validation' : avg_vloss },
236
- epoch * len (training_loader ) + i )
240
+ writer .add_scalars (
241
+ "Training vs. Validation Loss" ,
242
+ {"Training" : avg_loss , "Validation" : avg_vloss },
243
+ epoch * len (training_loader ) + i ,
244
+ )
237
245
238
246
running_loss = 0.0
239
- print (' Finished Training' )
247
+ print (" Finished Training" )
240
248
241
249
writer .flush ()
242
250
243
251
244
252
#########################################################################
245
253
# Switch to your open TensorBoard and have a look at the SCALARS tab.
246
- #
254
+ #
247
255
# Visualizing Your Model
248
256
# ----------------------
249
- #
257
+ #
250
258
# TensorBoard can also be used to examine the data flow within your model.
251
259
# To do this, call the ``add_graph()`` method with a model and sample
252
260
# input:
253
- #
261
+ #
254
262
255
263
# Again, grab a single mini-batch of images
256
264
dataiter = iter (training_loader )
@@ -266,20 +274,21 @@ def forward(self, x):
266
274
# When you switch over to TensorBoard, you should see a GRAPHS tab.
267
275
# Double-click the “NET” node to see the layers and data flow within your
268
276
# model.
269
- #
277
+ #
270
278
# Visualizing Your Dataset with Embeddings
271
279
# ----------------------------------------
272
- #
280
+ #
273
281
# The 28-by-28 image tiles we’re using can be modeled as 784-dimensional
274
282
# vectors (28 \* 28 = 784). It can be instructive to project this to a
275
283
# lower-dimensional representation. The ``add_embedding()`` method will
276
284
# project a set of data onto the three dimensions with highest variance,
277
285
# and display them as an interactive 3D chart. The ``add_embedding()``
278
286
# method does this automatically by projecting to the three dimensions
279
287
# with highest variance.
280
- #
288
+ #
281
289
# Below, we’ll take a sample of our data, and generate such an embedding:
282
- #
290
+ #
291
+
283
292
284
293
# Select a random subset of data and corresponding labels
285
294
def select_n_random (data , labels , n = 100 ):
@@ -288,6 +297,7 @@ def select_n_random(data, labels, n=100):
288
297
perm = torch .randperm (len (data ))
289
298
return data [perm ][:n ], labels [perm ][:n ]
290
299
300
+
291
301
# Extract a random subset of data
292
302
images , labels = select_n_random (training_set .data , training_set .targets )
293
303
@@ -296,9 +306,7 @@ def select_n_random(data, labels, n=100):
296
306
297
307
# log embeddings
298
308
features = images .view (- 1 , 28 * 28 )
299
- writer .add_embedding (features ,
300
- metadata = class_labels ,
301
- label_img = images .unsqueeze (1 ))
309
+ writer .add_embedding (features , metadata = class_labels , label_img = images .unsqueeze (1 ))
302
310
writer .flush ()
303
311
writer .close ()
304
312
@@ -309,19 +317,19 @@ def select_n_random(data, labels, n=100):
309
317
# zoom the model. Examine it at large and small scales, and see whether
310
318
# you can spot patterns in the projected data and the clustering of
311
319
# labels.
312
- #
320
+ #
313
321
# For better visibility, it’s recommended to:
314
- #
322
+ #
315
323
# - Select “label” from the “Color by” drop-down on the left.
316
324
# - Toggle the Night Mode icon along the top to place the
317
325
# light-colored images on a dark background.
318
- #
326
+ #
319
327
# Other Resources
320
328
# ---------------
321
- #
329
+ #
322
330
# For more information, have a look at:
323
- #
331
+ #
324
332
# - PyTorch documentation on `torch.utils.tensorboard.SummaryWriter <https://pytorch.org/docs/stable/tensorboard.html?highlight=summarywriter>`__
325
- # - Tensorboard tutorial content in the `PyTorch.org Tutorials <https://pytorch.org/tutorials/>`__
333
+ # - Tensorboard tutorial content in the `PyTorch.org Tutorials <https://pytorch.org/tutorials/>`__
326
334
# - For more information about TensorBoard, see the `TensorBoard
327
335
# documentation <https://www.tensorflow.org/tensorboard>`__
0 commit comments