Skip to content

Commit b4592e9

Browse files
authored
Merge pull request #3 from pytorch/grad
Update .grad attribute type
2 parents 7bfe48e + 0b8c6ba commit b4592e9

5 files changed

+115
-117
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
.ipynb_checkpoints
2-
*~
2+
data
3+
*~

Creating extensions using numpy and scipy.ipynb

+46-55
Original file line numberDiff line numberDiff line change
@@ -86,25 +86,25 @@
8686
"output_type": "stream",
8787
"text": [
8888
"\n",
89-
" 4.8235 2.8210 4.0698 11.3034 4.0292\n",
90-
" 2.8678 6.5351 0.9228 15.3372 3.8725\n",
91-
" 4.0342 7.6988 3.8099 4.0796 12.9163\n",
92-
" 9.2561 6.0761 6.0133 5.7306 5.8949\n",
93-
" 6.1931 3.0239 8.3571 9.1645 2.3575\n",
94-
" 9.2561 7.6919 5.4074 9.5971 5.8949\n",
95-
" 4.0342 13.2063 9.0728 5.2962 12.9163\n",
96-
" 2.8678 2.4252 2.0834 4.9570 3.8725\n",
89+
" 6.9997 11.0343 9.7395 6.0724 6.0526\n",
90+
" 7.0250 11.4841 7.1110 5.6337 8.6441\n",
91+
" 7.8062 10.9281 9.8279 23.4972 7.4842\n",
92+
" 6.4962 4.5987 0.7936 3.9360 4.9595\n",
93+
" 9.7913 10.3732 1.6261 2.0874 14.5295\n",
94+
" 6.4962 5.7111 1.9793 8.8037 4.9595\n",
95+
" 7.8062 8.7752 6.4442 14.1250 7.4842\n",
96+
" 7.0250 5.4642 1.7983 4.4346 8.6441\n",
9797
"[torch.FloatTensor of size 8x5]\n",
9898
"\n",
99-
"\n",
100-
" 0.1849 -0.0055 0.0743 -0.0751 0.1089 -0.0751 0.0743 -0.0055\n",
101-
"-0.0662 0.1506 0.1307 -0.0629 -0.1199 0.0800 -0.0873 0.1036\n",
102-
"-0.0024 -0.0936 0.0083 0.0327 -0.1370 -0.2486 -0.0117 -0.0216\n",
103-
"-0.0074 -0.1277 0.0631 0.0348 0.0422 0.1335 0.0221 -0.0900\n",
104-
" 0.1353 0.0098 0.0030 0.0408 -0.0442 0.0408 0.0030 0.0098\n",
105-
"-0.0074 -0.0900 0.0221 0.1335 0.0422 0.0348 0.0631 -0.1277\n",
106-
"-0.0024 -0.0216 -0.0117 -0.2486 -0.1370 0.0327 0.0083 -0.0936\n",
107-
"-0.0662 0.1036 -0.0873 0.0800 -0.1199 -0.0629 0.1307 0.1506\n",
99+
"Variable containing:\n",
100+
"-0.0129 0.0330 0.0036 -0.0737 0.2354 -0.0737 0.0036 0.0330\n",
101+
" 0.0542 0.0986 -0.0382 -0.1137 -0.0944 -0.0973 -0.0172 -0.0021\n",
102+
"-0.1538 -0.1444 0.0356 0.1590 0.0588 -0.0188 -0.0611 0.0346\n",
103+
" 0.1511 0.0370 -0.2513 -0.1518 0.1513 -0.2312 -0.0896 -0.1450\n",
104+
"-0.1668 -0.0814 0.1954 0.1405 0.2191 0.1405 0.1954 -0.0814\n",
105+
" 0.1511 -0.1450 -0.0896 -0.2312 0.1513 -0.1518 -0.2513 0.0370\n",
106+
"-0.1538 0.0346 -0.0611 -0.0188 0.0588 0.1590 0.0356 -0.1444\n",
107+
" 0.0542 -0.0021 -0.0172 -0.0973 -0.0944 -0.1137 -0.0382 0.0986\n",
108108
"[torch.FloatTensor of size 8x8]\n",
109109
"\n"
110110
]
@@ -139,7 +139,7 @@
139139
},
140140
{
141141
"cell_type": "code",
142-
"execution_count": 6,
142+
"execution_count": 4,
143143
"metadata": {
144144
"collapsed": false
145145
},
@@ -182,7 +182,7 @@
182182
},
183183
{
184184
"cell_type": "code",
185-
"execution_count": 7,
185+
"execution_count": 5,
186186
"metadata": {
187187
"collapsed": false
188188
},
@@ -192,33 +192,33 @@
192192
"output_type": "stream",
193193
"text": [
194194
"[Parameter containing:\n",
195-
"-0.1271 0.8109 0.4178\n",
196-
"-0.5126 -1.1835 -0.2776\n",
197-
" 0.4214 0.0886 1.0216\n",
195+
" 0.0460 0.5052 0.9281\n",
196+
" 0.8355 1.2642 -0.1283\n",
197+
" 1.7027 -0.3146 -0.6927\n",
198198
"[torch.FloatTensor of size 3x3]\n",
199199
"]\n",
200200
"Variable containing:\n",
201-
" 4.9120 0.6210 1.7908 -0.6933 3.4223 0.4025 2.0330 1.9110\n",
202-
"-1.6563 -0.9113 -2.3579 0.5714 -2.4509 -1.1477 -2.2362 0.2235\n",
203-
"-2.5879 -0.2629 0.0876 1.1707 1.2481 1.6186 1.2425 3.4960\n",
204-
" 2.6881 2.0360 1.5574 -0.8602 -2.8442 -2.2571 -1.5803 -2.0943\n",
205-
"-1.0176 -2.6565 -0.5091 1.5564 1.4575 3.1081 0.9570 1.0759\n",
206-
"-2.7108 0.7214 -3.5160 -2.0563 0.6138 -2.6700 -1.1769 -1.0721\n",
207-
" 2.8449 3.2918 1.6901 1.5427 2.4700 0.3433 2.2472 1.3501\n",
208-
"-3.9733 -1.4927 -0.6596 -1.2467 -2.1322 1.1351 -1.4640 -0.5982\n",
201+
" 1.4619 -4.0543 0.4391 -0.5423 -4.3719 3.9728 -0.4084 -2.8224\n",
202+
"-3.6799 -3.9278 4.9932 -3.8952 3.0663 1.6303 2.9775 1.1806\n",
203+
"-3.1694 2.1434 0.4432 1.6941 1.9344 -0.1196 1.1259 4.3571\n",
204+
"-0.7934 -1.4610 2.2360 0.6406 0.3729 1.9140 0.2427 0.4298\n",
205+
"-2.2961 -0.4189 5.6658 0.8090 -1.3030 2.2934 0.7164 -0.0272\n",
206+
" 1.0649 1.0400 -1.3774 -0.2026 -0.9841 1.7192 3.0843 3.4241\n",
207+
" 3.2743 -1.8780 -2.3084 0.8508 1.1622 0.6060 2.5559 1.0228\n",
208+
"-2.3282 -1.1790 -2.4604 -1.9252 -1.3962 1.1054 3.6035 3.1302\n",
209209
"[torch.FloatTensor of size 8x8]\n",
210210
"\n",
211-
"\n",
212-
"-0.0466 -0.1651 0.2731 0.0436 -0.1487 -0.3089 0.7800 0.4718 0.1200 -0.5005\n",
213-
" 0.1600 -0.8988 1.6481 0.3330 0.3586 -2.7015 0.7774 -0.2702 1.4118 0.0614\n",
214-
" 1.1303 -2.6691 0.4635 1.2966 2.5482 -3.1470 2.8663 -1.8794 -1.9309 -0.8698\n",
215-
"-0.0614 1.5925 -0.7043 -0.9832 -0.7737 -4.6351 5.2933 0.2257 -0.9895 0.9198\n",
216-
"-0.9014 2.8442 -2.7092 2.2500 1.1892 -5.0975 2.4289 0.2922 -2.1747 0.8316\n",
217-
"-2.7050 3.6107 -1.7208 -0.4780 -0.3891 -2.2356 1.2152 -1.4541 -0.5707 1.2749\n",
218-
"-2.1614 2.0130 -4.0183 0.6822 0.9159 0.5670 3.7633 -0.9087 0.0326 -0.0958\n",
219-
" 0.3509 0.1484 -1.2759 -0.8248 0.8566 -2.6416 2.8875 -1.2788 1.1253 0.5939\n",
220-
"-0.0029 0.4912 1.8060 -1.4529 2.6439 -0.9157 0.5279 -3.4779 0.2804 -0.2260\n",
221-
"-0.1932 0.1283 -0.1745 -0.4872 1.0467 -0.1953 0.3003 1.3696 0.8338 0.4173\n",
211+
"Variable containing:\n",
212+
" 0.0427 0.7780 1.7383 1.8333 3.8198 0.1135 -3.5576 -4.3994 -0.4354 -0.6021\n",
213+
" 0.4661 1.2470 2.1080 6.3960 0.6894 -4.5144 -3.2005 -0.2762 0.3508 1.7803\n",
214+
" 0.8492 0.9083 4.1836 0.6133 -3.4092 -1.8541 0.2254 3.6970 1.0382 0.5031\n",
215+
" 0.0919 1.7864 1.5422 0.2942 2.0176 1.0741 0.8390 2.6984 2.4786 0.2636\n",
216+
" 0.2600 0.5248 2.3759 2.1921 -3.4520 -3.2025 2.6008 -0.7395 0.3200 0.0964\n",
217+
" 0.1632 1.9750 2.5973 -2.0378 -5.2213 1.2097 1.3411 1.6995 -1.4448 -2.6965\n",
218+
" 0.5332 0.8034 -3.0446 -6.2269 -3.4281 -0.5354 -0.4278 -0.7310 -1.1542 0.7947\n",
219+
" 0.1243 -1.0476 -2.9011 -5.9247 -2.5209 -3.1030 -4.4343 -2.7956 1.4640 0.0090\n",
220+
"-0.9033 -0.4323 -2.5873 -1.8884 -1.4657 -1.4747 -0.0032 1.4012 -0.7892 -0.1049\n",
221+
" 0.0739 -0.7349 -0.3925 -0.9291 -1.1198 0.5321 1.9748 0.1242 -0.4062 0.3108\n",
222222
"[torch.FloatTensor of size 10x10]\n",
223223
"\n"
224224
]
@@ -233,34 +233,25 @@
233233
"output.backward(torch.randn(8, 8))\n",
234234
"print(input.grad)"
235235
]
236-
},
237-
{
238-
"cell_type": "code",
239-
"execution_count": null,
240-
"metadata": {
241-
"collapsed": true
242-
},
243-
"outputs": [],
244-
"source": []
245236
}
246237
],
247238
"metadata": {
248239
"kernelspec": {
249-
"display_name": "Python 2",
240+
"display_name": "Python 3",
250241
"language": "python",
251-
"name": "python2"
242+
"name": "python3"
252243
},
253244
"language_info": {
254245
"codemirror_mode": {
255246
"name": "ipython",
256-
"version": 2
247+
"version": 3
257248
},
258249
"file_extension": ".py",
259250
"mimetype": "text/x-python",
260251
"name": "python",
261252
"nbconvert_exporter": "python",
262-
"pygments_lexer": "ipython2",
263-
"version": "2.7.12"
253+
"pygments_lexer": "ipython3",
254+
"version": "3.5.2"
264255
}
265256
},
266257
"nbformat": 4,

Deep Learning with PyTorch.ipynb

+51-38
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@
1919
"It's a Python based scientific computing package targeted at two sets of audiences:\n",
2020
"\n",
2121
"- A replacement for numpy to use the power of GPUs\n",
22-
"- a deep learning research platform that provides maximum flexibility and speed"
22+
"- a deep learning research platform that provides maximum flexibility and speed\n",
23+
"\n",
24+
"**If you want to complete the full tutorial, including training a neural network for image classification, you have to install the `torchvision` package.**"
2325
]
2426
},
2527
{
@@ -88,6 +90,13 @@
8890
"x.size()"
8991
]
9092
},
93+
{
94+
"cell_type": "markdown",
95+
"metadata": {},
96+
"source": [
97+
"*NOTE: `torch.Size` is in fact a tuple, so it supports the same operations*"
98+
]
99+
},
91100
{
92101
"cell_type": "code",
93102
"execution_count": null,
@@ -293,23 +302,23 @@
293302
"## Autograd: automatic differentiation\n",
294303
"\n",
295304
"The `autograd` package provides automatic differentiation for all operations on Tensors. \n",
296-
"It is a define-by-run framework, which means that your backprop is defined by how your code is run. \n",
305+
"It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different. \n",
297306
"\n",
298307
"Let us see this in more simple terms with some examples.\n",
299308
"\n",
300309
"`autograd.Variable` is the central class of the package. \n",
301-
"It wraps a Tensor, and afterwards you can run tensor operations on it, and finally call `.backward()`\n",
310+
"It wraps a Tensor, and supports nearly all of operations defined on it. Once you finish your computation you can call `.backward()` and have all the gradients computed automatically.\n",
302311
"\n",
303-
"You can access the raw tensor through the `.data` attribute, and after computing the backward pass, a gradient w.r.t. this variable is accumulated into `.grad` attribute.\n",
312+
"You can access the raw tensor through the `.data` attribute, while the gradient w.r.t. this variable is accumulated into `.grad`.\n",
304313
"\n",
305314
"![Variable](images/Variable.png)\n",
306315
"\n",
307316
"There's one more class which is very important for autograd implementation - a `Function`. \n",
308317
"\n",
309-
"`Variable` and `Function` are interconnected and build up an acyclic graph, that encodes a complete history of computation. Each variable has a `.creator` attribute that references a `Function` that has created the `Variable` (except for Variables created by the user - these have `creator=None`).\n",
318+
"`Variable` and `Function` are interconnected and build up an acyclic graph, that encodes a complete history of computation. Each variable has a `.creator` attribute that references a `Function` that has created the `Variable` (except for Variables created by the user - their `creator is None`).\n",
310319
"\n",
311320
"If you want to compute the derivatives, you can call `.backward()` on a `Variable`. \n",
312-
"If `Variable` is a scalar (i.e. it holds a one element tensor), you don't need to specify any arguments to `backward()`, however if it has more elements, you need to specify a `grad_output` argument that is a tensor of matching shape.\n"
321+
"If `Variable` is a scalar (i.e. it holds a one element data), you don't need to specify any arguments to `backward()`, however if it has more elements, you need to specify a `grad_output` argument that is a tensor of matching shape.\n"
313322
]
314323
},
315324
{
@@ -523,26 +532,31 @@
523532
"outputs": [],
524533
"source": [
525534
"import torch.nn as nn\n",
535+
"import torch.nn.functional as F\n",
536+
"# Some more python helpers\n",
537+
"import functools\n",
538+
"import operator\n",
526539
"\n",
527540
"class Net(nn.Container):\n",
528541
" def __init__(self):\n",
529542
" super(Net, self).__init__()\n",
530543
" self.conv1 = nn.Conv2d(1, 6, 5) # 1 input image channel, 6 output channels, 5x5 square convolution kernel\n",
531-
" self.pool = nn.MaxPool2d(2,2) # A max-pooling operation that looks at 2x2 windows and finds the max.\n",
532544
" self.conv2 = nn.Conv2d(6, 16, 5)\n",
533545
" self.fc1 = nn.Linear(16*5*5, 120) # an affine operation: y = Wx + b\n",
534546
" self.fc2 = nn.Linear(120, 84)\n",
535547
" self.fc3 = nn.Linear(84, 10)\n",
536-
" self.relu = nn.ReLU()\n",
537548
"\n",
538549
" def forward(self, x):\n",
539-
" x = self.pool(self.relu(self.conv1(x)))\n",
540-
" x = self.pool(self.relu(self.conv2(x)))\n",
541-
" x = x.view(-1, 16*5*5)\n",
542-
" x = self.relu(self.fc1(x))\n",
543-
" x = self.relu(self.fc2(x))\n",
550+
" x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window\n",
551+
" x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number\n",
552+
" x = x.view(-1, self.num_flat_features(x))\n",
553+
" x = F.relu(self.fc1(x))\n",
554+
" x = F.relu(self.fc2(x))\n",
544555
" x = self.fc3(x)\n",
545556
" return x\n",
557+
" \n",
558+
" def num_flat_features(self, x):\n",
559+
" return functools.reduce(operator.mul, x.size()[1:])\n",
546560
"\n",
547561
"net = Net()\n",
548562
"net"
@@ -610,15 +624,25 @@
610624
"source": [
611625
"> #### NOTE: `torch.nn` only supports mini-batches\n",
612626
"The entire `torch.nn` package only supports inputs that are a mini-batch of samples, and not a single sample. \n",
613-
"For example, `nn.Conv2d` will take in a 4D Tensor of `nSamples x nChannels x Height x Width` \n",
614-
"*This is done to simplify developer code and eliminate bugs*"
627+
"For example, `nn.Conv2d` will take in a 4D Tensor of `nSamples x nChannels x Height x Width`.\n",
628+
"\n",
629+
"> *If you have a single sample, just use `input.unsqueeze(0)` to add a fake batch dimension.*"
615630
]
616631
},
617632
{
618633
"cell_type": "markdown",
619634
"metadata": {},
620635
"source": [
621-
"##### Review of what you learnt so far:\n",
636+
"### Recap of all the classes you've seen so far:\n",
637+
"\n",
638+
"* `torch.Tensor` - A **multi-dimensional array**.\n",
639+
"* `autograd.Variable` - **Wraps a Tensor and records the history of operations** applied to it. Has the same API as a `Tensor`, with some additions like `backward()`. Also **holds the gradient** w.r.t. the tensor.\n",
640+
"* `nn.Module` - Neural network module. **Convenient way of encapsulating parameters**, with helpers for moving them to GPU, exporting, loading, etc.\n",
641+
"* `nn.Container` - `Module` that is a **container for other Modules**.\n",
642+
"* `nn.Parameter` - A kind of Variable, that is **automatically registered as a parameter when assigned as an attribute to a `Module`**.\n",
643+
"* `autograd.Function` - Implements **forward and backward definitions of an autograd operation**. Every `Variable` operation, creates at least a single `Function` node, that connects to functions that created a `Variable` and **encodes its history**.\n",
644+
"\n",
645+
"##### At this point, we covered:\n",
622646
"- Defining a neural network\n",
623647
"- Processing inputs and calling backward.\n",
624648
"\n",
@@ -670,7 +694,7 @@
670694
" -> loss\n",
671695
"```\n",
672696
"\n",
673-
"So, when we call `loss.backward()`, the whole graph is differentiated w.r.t. the loss, and all Variables in the graph will have their `.grad` Tensor accumulated with the gradient.\n",
697+
"So, when we call `loss.backward()`, the whole graph is differentiated w.r.t. the loss, and all Variables in the graph will have their `.grad` Variable accumulated with the gradient.\n",
674698
" "
675699
]
676700
},
@@ -727,7 +751,7 @@
727751
"```python\n",
728752
"learning_rate = 0.01\n",
729753
"for f in net.parameters():\n",
730-
" f.data.sub_(f.grad * learning_rate)\n",
754+
" f.data.sub_(f.grad.data * learning_rate)\n",
731755
"```\n",
732756
"\n",
733757
"However, as you use neural networks, you want to use various different update rules such as SGD, Nesterov-SGD, Adam, RMSProp, etc.\n",
@@ -822,13 +846,11 @@
822846
"transform=transforms.Compose([transforms.ToTensor(),\n",
823847
" transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n",
824848
" ])\n",
825-
"trainset = torchvision.datasets.CIFAR10(root='/Users/soumith/code/pytorch-vision/test/cifar', \n",
826-
" train=True, download=True, transform=transform)\n",
849+
"trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n",
827850
"trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, \n",
828851
" shuffle=True, num_workers=2)\n",
829852
"\n",
830-
"testset = torchvision.datasets.CIFAR10(root='/Users/soumith/code/pytorch-vision/test/cifar', \n",
831-
" train=False, download=True, transform=transform)\n",
853+
"testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n",
832854
"testloader = torch.utils.data.DataLoader(testset, batch_size=4, \n",
833855
" shuffle=False, num_workers=2)\n",
834856
"classes = ('plane', 'car', 'bird', 'cat',\n",
@@ -1163,8 +1185,8 @@
11631185
"metadata": {},
11641186
"source": [
11651187
"#### Training on the GPU\n",
1166-
"The idea is pretty simple. \n",
1167-
"Just like how you transfer a Tensor on to the GPU, you transfer the neural net onto the GPU."
1188+
"Just like how you transfer a Tensor on to the GPU, you transfer the neural net onto the GPU.\n",
1189+
"This will recursively go over all modules and convert their parameters and buffers to CUDA tensors."
11681190
]
11691191
},
11701192
{
@@ -1207,34 +1229,25 @@
12071229
"- [More tutorials](https://github.com/pytorch/tutorials)\n",
12081230
"- [Chat with other users on Slack](pytorch.slack.com/messages/beginner/)"
12091231
]
1210-
},
1211-
{
1212-
"cell_type": "code",
1213-
"execution_count": null,
1214-
"metadata": {
1215-
"collapsed": true
1216-
},
1217-
"outputs": [],
1218-
"source": []
12191232
}
12201233
],
12211234
"metadata": {
12221235
"kernelspec": {
1223-
"display_name": "Python 2",
1236+
"display_name": "Python 3",
12241237
"language": "python",
1225-
"name": "python2"
1238+
"name": "python3"
12261239
},
12271240
"language_info": {
12281241
"codemirror_mode": {
12291242
"name": "ipython",
1230-
"version": 2
1243+
"version": 3
12311244
},
12321245
"file_extension": ".py",
12331246
"mimetype": "text/x-python",
12341247
"name": "python",
12351248
"nbconvert_exporter": "python",
1236-
"pygments_lexer": "ipython2",
1237-
"version": "2.7.12"
1249+
"pygments_lexer": "ipython3",
1250+
"version": "3.5.2"
12381251
}
12391252
},
12401253
"nbformat": 4,

0 commit comments

Comments
 (0)