8
8
from torch .autograd import Variable
9
9
10
10
11
- # Create a torch tensor with random normal.
12
- x = torch .randn (5 , 3 )
13
- print (x )
14
-
15
- # Build a layer.
11
+ #========================== Table of Contents ==========================#
12
+ # 1. Basic autograd example 1 (Line 21 to 36)
13
+ # 2. Basic autograd example 2 (Line 39 to 80)
14
+ # 3. Loading data from numpy (Line 83 to 86)
15
+ # 4. Implementing the input pipline (Line 90 to 117)
16
+ # 5. Input pipline for custom dataset (Line 119 to 139)
17
+ # 6. Using pretrained model (Line142 to 156)
18
+ # 7. Save and load model (Line 159 to L161)
19
+
20
+
21
+ #======================= Basic autograd example 1 =======================#
22
+ # Create tensors.
23
+ x = Variable (torch .Tensor ([1 ]), requires_grad = True )
24
+ w = Variable (torch .Tensor ([2 ]), requires_grad = True )
25
+ b = Variable (torch .Tensor ([3 ]), requires_grad = True )
26
+
27
+ # Build a computational graph.
28
+ y = w * x + b # y = 2 * x + 3
29
+
30
+ # Compute gradients
31
+ y .backward ()
32
+
33
+ # Print out the gradients
34
+ print (x .grad ) # x.grad = 2
35
+ print (w .grad ) # w.grad = 1
36
+ print (b .grad ) # b.grad = 1
37
+
38
+
39
+ #======================== Basic autograd example 2 =======================#
40
+ # Create tensors.
41
+ x = Variable (torch .randn (5 , 3 ))
42
+ y = Variable (torch .randn (5 , 2 ))
43
+ print ('x: ' , x )
44
+ print ('y: ' , y )
45
+
46
+ # Build a linear layer.
16
47
linear = nn .Linear (3 , 2 )
17
- print (linear .weight )
18
- print (linear .bias )
48
+ print ('w: ' , linear .weight )
49
+ print ('b: ' , linear .bias )
50
+
51
+ # Build Loss and Optimizer.
52
+ criterion = nn .MSELoss ()
53
+ optimizer = torch .optim .SGD (linear .parameters (), lr = 0.01 )
54
+
55
+ # Forward propagation.
56
+ pred = linear (x )
57
+ print ('pred: ' , pred )
58
+
59
+ # Compute loss.
60
+ loss = criterion (pred , y )
61
+ print ('loss: ' , loss .data [0 ])
62
+
63
+ # Backpropagation.
64
+ loss .backward ()
65
+
66
+ # Print out the gradients.
67
+ print ('dL/dw: ' , linear .weight .grad )
68
+ print ('dL/db: ' , linear .bias .grad )
69
+
70
+ # 1-step Optimization (gradient descent).
71
+ optimizer .step ()
72
+ print ('Optimized..!' )
19
73
20
- # Forward propagate .
21
- y = linear ( Variable ( x ) )
22
- print ( y )
74
+ # You can also do optimization at the low level as shown below .
75
+ # linear.weight.data.sub_(0.01 * linear.weight.grad.data )
76
+ # linear.bias.data.sub_(0.01 * linear.bias.grad.data )
23
77
24
- # Convert numpy array to torch tensor.
78
+ # Print out the loss after optimization.
79
+ loss = criterion (pred , y )
80
+ print ('loss after 1 step optimization: ' , loss .data [0 ])
81
+
82
+
83
+ #======================== Loading data from numpy ========================#
25
84
a = np .array ([[1 ,2 ], [3 ,4 ]])
26
85
b = torch .from_numpy (a )
27
86
print (b )
28
87
29
- # Download and load cifar10 dataset .
30
- train_dataset = dsets .CIFAR10 (root = './data/' ,
88
+
89
+
90
+ #===================== Implementing the input pipline =====================#
91
+ # Download and construct dataset.
92
+ train_dataset = dsets .CIFAR10 (root = '../data/' ,
31
93
train = True ,
32
94
transform = transforms .ToTensor (),
33
95
download = True )
34
96
35
- # Select one data pair.
97
+ # Select one data pair (read data from disk) .
36
98
image , label = train_dataset [0 ]
37
99
print (image .size ())
38
100
print (label )
39
101
40
- # Input pipeline (this provides queue and thread in a very simple way).
102
+ # Data Loader (this provides queue and thread in a very simple way).
41
103
train_loader = torch .utils .data .DataLoader (dataset = train_dataset ,
42
104
batch_size = 100 ,
43
105
shuffle = True ,
44
106
num_workers = 2 )
45
107
46
- # When iteration starts, queue and thread start to load dataset.
108
+ # When iteration starts, queue and thread start to load dataset from files .
47
109
data_iter = iter (train_loader )
48
110
49
111
# Mini-batch images and labels.
54
116
# Your training code will be written here
55
117
pass
56
118
57
- # Build custom dataset.
119
+ #===================== Input pipline for custom dataset =====================#
120
+ # You should build custom dataset as below.
58
121
class CustomDataset (data .Dataset ):
59
122
def __init__ (self ):
123
+ # TODO
124
+ # 1. Initialize file path or list of file names.
60
125
pass
61
126
def __getitem__ (self , index ):
62
127
# TODO
63
- # 1. Read one data from file (e.g. using np .fromfile, PIL.Image.open).
128
+ # 1. Read one data from file (e.g. using numpy .fromfile, PIL.Image.open).
64
129
# 2. Return a data pair (e.g. image and label).
65
130
pass
66
131
def __len__ (self ):
67
132
# You should change 0 to the total size of your dataset.
68
133
return 0
69
134
135
+ # Then, you can just use prebuilt torch's data loader.
70
136
train_loader = torch .utils .data .DataLoader (dataset = train_dataset ,
71
137
batch_size = 100 ,
72
138
shuffle = True ,
73
139
num_workers = 2 )
74
140
75
141
76
- # Download and load pretrained model.
142
+ #========================== Using pretrained model ==========================#
143
+ # Download and load pretrained resnet.
77
144
resnet = torchvision .models .resnet18 (pretrained = True )
78
145
79
- # Detach top layer for finetuning.
80
- sub_model = nn .Sequential (* list (resnet .children ())[:- 1 ])
146
+ # If you want to finetune only top layer of the model.
147
+ for param in resnet .parameters ():
148
+ param .requires_grad = False
149
+
150
+ # Replace top layer for finetuning.
151
+ resnet .fc = nn .Linear (resnet .fc .in_features , 100 ) # 100 is for example.
81
152
82
153
# For test
83
154
images = Variable (torch .randn (10 , 3 , 256 , 256 ))
84
- print (resnet (images ).size ())
85
- print (sub_model (images ).size ())
155
+ outputs = resnet (images )
156
+ print (outputs .size ()) # (10, 100)
157
+
86
158
87
- # Save and load the model.
88
- torch .save (sub_model , 'model.pkl' )
159
+ #============================ Save and load model ============================#
160
+ torch .save (resnet , 'model.pkl' )
89
161
model = torch .load ('model.pkl' )
0 commit comments