finish chapter 6
This commit is contained in:
52
06_03_gradient_torch_loss_optim.py
Normal file
52
06_03_gradient_torch_loss_optim.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# prediction manual
|
||||
# gradient computation autograd
|
||||
# loss computation pytorch loss -> loss function gets replaced by pytorch function
|
||||
# parameter update pytorch optimizer -> update weights gets replaced by optimizer.step()
|
||||
|
||||
import torch
|
||||
import torch.nn as nn # neural network module
|
||||
|
||||
|
||||
# linear regression, no bias
|
||||
# f = w*x
|
||||
# f = 2*x
|
||||
|
||||
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
|
||||
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
|
||||
|
||||
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True) #requires grad for gradient
|
||||
|
||||
# model prediction
|
||||
def forward(x):
|
||||
return w*x
|
||||
|
||||
print(f'Prediction before training: f(5) = {forward(5):.3f}')
|
||||
|
||||
#Training
|
||||
learning_rate = .01
|
||||
n_iters = 100
|
||||
|
||||
loss = nn.MSELoss() # use pytorch built in MSE loss function
|
||||
optimizer = torch.optim.SGD([w], lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate
|
||||
|
||||
|
||||
for epoch in range(n_iters):
|
||||
# prediction = forward pass
|
||||
y_pred = forward(X)
|
||||
|
||||
# loss
|
||||
l = loss(Y, y_pred)
|
||||
|
||||
# gradients = backward pass
|
||||
l.backward()
|
||||
|
||||
#update weights
|
||||
optimizer.step()
|
||||
|
||||
# clear gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
if epoch % 10 == 0: #every nth epoch
|
||||
print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}')
|
||||
|
||||
print(f'Prediction after training: f(5) = {forward(5):.3f}')
|
||||
Reference in New Issue
Block a user