Skip to content

Commit 5d8b081

Browse files
committed
Update for Feb. 2020 MEAP release
1 parent c9ad558 commit 5d8b081

File tree

105 files changed

+8767
-14282
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

105 files changed

+8767
-14282
lines changed
File renamed without changes.

data/p3ch15/cls_val_example.pt

289 KB
Binary file not shown.

data/part2/luna/annotations_with_malignancy.csv

Lines changed: 1183 additions & 0 deletions
Large diffs are not rendered by default.

p1ch3/1_tensors.ipynb

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,8 @@
6767
}
6868
],
6969
"source": [
70-
"import torch\n",
71-
"a = torch.ones(3)\n",
70+
"import torch # <1>\n",
71+
"a = torch.ones(3) # <2>\n",
7272
"a"
7373
]
7474
},
@@ -274,7 +274,7 @@
274274
}
275275
],
276276
"source": [
277-
"points = torch.FloatTensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])\n",
277+
"points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])\n",
278278
"points"
279279
]
280280
},
@@ -1022,10 +1022,22 @@
10221022
"cell_type": "code",
10231023
"execution_count": 51,
10241024
"metadata": {},
1025-
"outputs": [],
1025+
"outputs": [
1026+
{
1027+
"data": {
1028+
"text/plain": [
1029+
"tensor([0., 0., 0., 0., 0.], dtype=torch.float64)"
1030+
]
1031+
},
1032+
"execution_count": 51,
1033+
"metadata": {},
1034+
"output_type": "execute_result"
1035+
}
1036+
],
10261037
"source": [
1027-
"points = torch.randn(10, 2) # <1>\n",
1028-
"short_points = points.type(torch.short)"
1038+
"points_64 = torch.rand(5, dtype=torch.double) # <1>\n",
1039+
"points_short = points_64.to(torch.short)\n",
1040+
"points_64 * points_short # works from PyTorch 1.3 onwards"
10291041
]
10301042
},
10311043
{
@@ -1072,7 +1084,9 @@
10721084
{
10731085
"data": {
10741086
"text/plain": [
1075-
"tensor([5., 2.])"
1087+
"tensor([[[4., 1.],\n",
1088+
" [5., 3.],\n",
1089+
" [2., 1.]]])"
10761090
]
10771091
},
10781092
"execution_count": 54,
@@ -1083,7 +1097,8 @@
10831097
"source": [
10841098
"points[1:] # <1>\n",
10851099
"points[1:, :] # <2>\n",
1086-
"points[1:, 0] # <3>"
1100+
"points[1:, 0] # <3>\n",
1101+
"points[None] # <4>"
10871102
]
10881103
},
10891104
{

p1ch3/2_named_tensors.ipynb

Lines changed: 280 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,280 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 1,
6+
"metadata": {},
7+
"outputs": [
8+
{
9+
"name": "stderr",
10+
"output_type": "stream",
11+
"text": [
12+
"../c10/core/TensorImpl.h:860: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable.\n"
13+
]
14+
}
15+
],
16+
"source": [
17+
"import torch\n",
18+
"_ = torch.tensor([0.2126, 0.7152, 0.0722], names=['c'])"
19+
]
20+
},
21+
{
22+
"cell_type": "code",
23+
"execution_count": 2,
24+
"metadata": {},
25+
"outputs": [],
26+
"source": [
27+
"img_t = torch.randn(3, 5, 5) # shape [channels, rows, columns]\n",
28+
"weights = torch.tensor([0.2126, 0.7152, 0.0722])"
29+
]
30+
},
31+
{
32+
"cell_type": "code",
33+
"execution_count": 3,
34+
"metadata": {},
35+
"outputs": [],
36+
"source": [
37+
"batch_t = torch.randn(2, 3, 5, 5) # shape [batch, channels, rows, columns]"
38+
]
39+
},
40+
{
41+
"cell_type": "code",
42+
"execution_count": 4,
43+
"metadata": {},
44+
"outputs": [
45+
{
46+
"data": {
47+
"text/plain": [
48+
"(torch.Size([5, 5]), torch.Size([2, 5, 5]))"
49+
]
50+
},
51+
"execution_count": 4,
52+
"metadata": {},
53+
"output_type": "execute_result"
54+
}
55+
],
56+
"source": [
57+
"img_gray_naive = img_t.mean(-3)\n",
58+
"batch_gray_naive = batch_t.mean(-3)\n",
59+
"img_gray_naive.shape, batch_gray_naive.shape"
60+
]
61+
},
62+
{
63+
"cell_type": "code",
64+
"execution_count": 5,
65+
"metadata": {},
66+
"outputs": [
67+
{
68+
"data": {
69+
"text/plain": [
70+
"(torch.Size([2, 3, 5, 5]), torch.Size([2, 3, 5, 5]), torch.Size([3, 1, 1]))"
71+
]
72+
},
73+
"execution_count": 5,
74+
"metadata": {},
75+
"output_type": "execute_result"
76+
}
77+
],
78+
"source": [
79+
"unsqueezed_weights = weights.unsqueeze(-1).unsqueeze_(-1)\n",
80+
"img_weights = (img_t * unsqueezed_weights)\n",
81+
"batch_weights = (batch_t * unsqueezed_weights)\n",
82+
"img_gray_weighted = img_weights.sum(-3)\n",
83+
"batch_gray_weighted = batch_weights.sum(-3)\n",
84+
"batch_weights.shape, batch_t.shape, unsqueezed_weights.shape"
85+
]
86+
},
87+
{
88+
"cell_type": "code",
89+
"execution_count": 6,
90+
"metadata": {
91+
"scrolled": true
92+
},
93+
"outputs": [
94+
{
95+
"data": {
96+
"text/plain": [
97+
"torch.Size([2, 5, 5])"
98+
]
99+
},
100+
"execution_count": 6,
101+
"metadata": {},
102+
"output_type": "execute_result"
103+
}
104+
],
105+
"source": [
106+
"img_gray_weighted_fancy = torch.einsum('...chw,c->...hw', img_t, weights)\n",
107+
"batch_gray_weighted_fancy = torch.einsum('...chw,c->...hw', batch_t, weights)\n",
108+
"batch_gray_weighted_fancy.shape"
109+
]
110+
},
111+
{
112+
"cell_type": "code",
113+
"execution_count": 7,
114+
"metadata": {},
115+
"outputs": [
116+
{
117+
"data": {
118+
"text/plain": [
119+
"tensor([0.2126, 0.7152, 0.0722], names=('channels',))"
120+
]
121+
},
122+
"execution_count": 7,
123+
"metadata": {},
124+
"output_type": "execute_result"
125+
}
126+
],
127+
"source": [
128+
"weights_named = torch.tensor([0.2126, 0.7152, 0.0722], names=['channels'])\n",
129+
"weights_named"
130+
]
131+
},
132+
{
133+
"cell_type": "code",
134+
"execution_count": 8,
135+
"metadata": {
136+
"scrolled": false
137+
},
138+
"outputs": [
139+
{
140+
"name": "stdout",
141+
"output_type": "stream",
142+
"text": [
143+
"img named: torch.Size([3, 5, 5]) ('channels', 'rows', 'columns')\n",
144+
"batch named: torch.Size([2, 3, 5, 5]) (None, 'channels', 'rows', 'columns')\n"
145+
]
146+
}
147+
],
148+
"source": [
149+
"img_named = img_t.refine_names(..., 'channels', 'rows', 'columns')\n",
150+
"batch_named = batch_t.refine_names(..., 'channels', 'rows', 'columns')\n",
151+
"print(\"img named:\", img_named.shape, img_named.names)\n",
152+
"print(\"batch named:\", batch_named.shape, batch_named.names)"
153+
]
154+
},
155+
{
156+
"cell_type": "code",
157+
"execution_count": 9,
158+
"metadata": {
159+
"scrolled": false
160+
},
161+
"outputs": [
162+
{
163+
"data": {
164+
"text/plain": [
165+
"(torch.Size([3, 1, 1]), ('channels', 'rows', 'columns'))"
166+
]
167+
},
168+
"execution_count": 9,
169+
"metadata": {},
170+
"output_type": "execute_result"
171+
}
172+
],
173+
"source": [
174+
"weights_aligned = weights_named.align_as(img_named)\n",
175+
"weights_aligned.shape, weights_aligned.names"
176+
]
177+
},
178+
{
179+
"cell_type": "code",
180+
"execution_count": 10,
181+
"metadata": {
182+
"scrolled": true
183+
},
184+
"outputs": [
185+
{
186+
"data": {
187+
"text/plain": [
188+
"(torch.Size([5, 5]), ('rows', 'columns'))"
189+
]
190+
},
191+
"execution_count": 10,
192+
"metadata": {},
193+
"output_type": "execute_result"
194+
}
195+
],
196+
"source": [
197+
"gray_named = (img_named * weights_aligned).sum('channels')\n",
198+
"gray_named.shape, gray_named.names"
199+
]
200+
},
201+
{
202+
"cell_type": "code",
203+
"execution_count": 11,
204+
"metadata": {
205+
"scrolled": true
206+
},
207+
"outputs": [
208+
{
209+
"name": "stdout",
210+
"output_type": "stream",
211+
"text": [
212+
"Error when attempting to broadcast dims ['channels', 'rows', 'columns'] and dims ['channels']: dim 'columns' and dim 'channels' are at the same position from the right but do not match.\n"
213+
]
214+
}
215+
],
216+
"source": [
217+
"try:\n",
218+
" gray_named = (img_named[..., :3] * weights_named).sum('channels')\n",
219+
"except Exception as e:\n",
220+
" print(e)"
221+
]
222+
},
223+
{
224+
"cell_type": "code",
225+
"execution_count": 12,
226+
"metadata": {},
227+
"outputs": [
228+
{
229+
"data": {
230+
"text/plain": [
231+
"(torch.Size([5, 5]), (None, None))"
232+
]
233+
},
234+
"execution_count": 12,
235+
"metadata": {},
236+
"output_type": "execute_result"
237+
}
238+
],
239+
"source": [
240+
"gray_plain = gray_named.rename(None)\n",
241+
"gray_plain.shape, gray_plain.names"
242+
]
243+
},
244+
{
245+
"cell_type": "code",
246+
"execution_count": null,
247+
"metadata": {},
248+
"outputs": [],
249+
"source": []
250+
},
251+
{
252+
"cell_type": "code",
253+
"execution_count": null,
254+
"metadata": {},
255+
"outputs": [],
256+
"source": []
257+
}
258+
],
259+
"metadata": {
260+
"kernelspec": {
261+
"display_name": "Python 3",
262+
"language": "python",
263+
"name": "python3"
264+
},
265+
"language_info": {
266+
"codemirror_mode": {
267+
"name": "ipython",
268+
"version": 3
269+
},
270+
"file_extension": ".py",
271+
"mimetype": "text/x-python",
272+
"name": "python",
273+
"nbconvert_exporter": "python",
274+
"pygments_lexer": "ipython3",
275+
"version": "3.7.5"
276+
}
277+
},
278+
"nbformat": 4,
279+
"nbformat_minor": 2
280+
}

p1ch4/5_image_dog.ipynb renamed to p1ch4/1_image_dog.ipynb

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
"outputs": [],
4242
"source": [
4343
"img = torch.from_numpy(img_arr)\n",
44-
"out = torch.transpose(img, 0, 2)"
44+
"out = img.permute(2, 0, 1)"
4545
]
4646
},
4747
{
@@ -50,23 +50,28 @@
5050
"metadata": {},
5151
"outputs": [],
5252
"source": [
53-
"batch_size = 100\n",
54-
"batch = torch.zeros(100, 3, 256, 256, dtype=torch.uint8)"
53+
"batch_size = 3\n",
54+
"batch = torch.zeros(batch_size, 3, 256, 256, dtype=torch.uint8)"
5555
]
5656
},
5757
{
5858
"cell_type": "code",
5959
"execution_count": 5,
60-
"metadata": {},
60+
"metadata": {
61+
"scrolled": true
62+
},
6163
"outputs": [],
6264
"source": [
6365
"import os\n",
6466
"\n",
6567
"data_dir = '../data/p1ch4/image-cats/'\n",
6668
"filenames = [name for name in os.listdir(data_dir) if os.path.splitext(name)[-1] == '.png']\n",
6769
"for i, filename in enumerate(filenames):\n",
68-
" img_arr = imageio.imread(filename)\n",
69-
" batch[i] = torch.transpose(torch.from_numpy(img_arr), 0, 2)"
70+
" img_arr = imageio.imread(os.path.join(data_dir, filename))\n",
71+
" img_t = torch.from_numpy(img_arr)\n",
72+
" img_t = img_t.permute(2, 0, 1)\n",
73+
" img_t = img_t[:3] # <1>\n",
74+
" batch[i] = img_t"
7075
]
7176
},
7277
{
@@ -109,7 +114,7 @@
109114
"name": "python",
110115
"nbconvert_exporter": "python",
111116
"pygments_lexer": "ipython3",
112-
"version": "3.6.6"
117+
"version": "3.7.6"
113118
}
114119
},
115120
"nbformat": 4,
File renamed without changes.

0 commit comments

Comments
 (0)