finish chapter 6
This commit is contained in:
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +0,0 @@
|
|||||||
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
import numpy as np
|
# prediction manual
|
||||||
|
# gradient computation manual
|
||||||
|
# loss computation manual
|
||||||
|
# parameter update manual
|
||||||
|
|
||||||
# linear regression, no bias
|
# linear regression, no bias
|
||||||
# f = w*x
|
|
||||||
# f = 2*x
|
# f = 2*x
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
X = np.array([1, 2, 3, 4], dtype=np.float32)
|
X = np.array([1, 2, 3, 4], dtype=np.float32)
|
||||||
Y = np.array([2, 4, 6, 8], dtype=np.float32)
|
Y = np.array([2, 4, 6, 8], dtype=np.float32)
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,14 @@
|
|||||||
import torch
|
# prediction manual
|
||||||
|
# gradient computation autograd -> gradient computation gets replaced by backward()
|
||||||
|
# loss computation manual
|
||||||
|
# parameter update manual
|
||||||
|
|
||||||
# linear regression, no bias
|
# linear regression, no bias
|
||||||
# f = w*x
|
|
||||||
# f = 2*x
|
# f = 2*x
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
|
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
|
||||||
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
|
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
|
||||||
|
|
||||||
|
|||||||
52
06_03_gradient_torch_loss_optim.py
Normal file
52
06_03_gradient_torch_loss_optim.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# prediction manual
|
||||||
|
# gradient computation autograd
|
||||||
|
# loss computation pytorch loss -> loss function gets replaced by pytorch function
|
||||||
|
# parameter update pytorch optimizer -> update weights gets replaced by optimizer.step()
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn # neural network module
|
||||||
|
|
||||||
|
|
||||||
|
# linear regression, no bias
|
||||||
|
# f = w*x
|
||||||
|
# f = 2*x
|
||||||
|
|
||||||
|
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
|
||||||
|
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
|
||||||
|
|
||||||
|
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True) #requires grad for gradient
|
||||||
|
|
||||||
|
# model prediction
|
||||||
|
def forward(x):
|
||||||
|
return w*x
|
||||||
|
|
||||||
|
print(f'Prediction before training: f(5) = {forward(5):.3f}')
|
||||||
|
|
||||||
|
#Training
|
||||||
|
learning_rate = .01
|
||||||
|
n_iters = 100
|
||||||
|
|
||||||
|
loss = nn.MSELoss() # use pytorch built in MSE loss function
|
||||||
|
optimizer = torch.optim.SGD([w], lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate
|
||||||
|
|
||||||
|
|
||||||
|
for epoch in range(n_iters):
|
||||||
|
# prediction = forward pass
|
||||||
|
y_pred = forward(X)
|
||||||
|
|
||||||
|
# loss
|
||||||
|
l = loss(Y, y_pred)
|
||||||
|
|
||||||
|
# gradients = backward pass
|
||||||
|
l.backward()
|
||||||
|
|
||||||
|
#update weights
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# clear gradients
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
if epoch % 10 == 0: #every nth epoch
|
||||||
|
print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}')
|
||||||
|
|
||||||
|
print(f'Prediction after training: f(5) = {forward(5):.3f}')
|
||||||
68
06_04_gradient_torch_model.py
Normal file
68
06_04_gradient_torch_model.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# prediction pytorch model -> forward function gets replaced by pytroch model
|
||||||
|
# gradient computation autograd
|
||||||
|
# loss computation pytorch loss
|
||||||
|
# parameter update pytorch optimizer
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn # neural network module
|
||||||
|
|
||||||
|
|
||||||
|
# linear regression, no bias
|
||||||
|
# f = w*x
|
||||||
|
# f = 2*x
|
||||||
|
|
||||||
|
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32) #reshape for pytorch model
|
||||||
|
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
|
||||||
|
X_test = torch.tensor([5], dtype=torch.float32)
|
||||||
|
|
||||||
|
n_samples, n_features = X.shape
|
||||||
|
print(n_samples, n_features)
|
||||||
|
|
||||||
|
input_size = n_features
|
||||||
|
output_size = n_features
|
||||||
|
|
||||||
|
# model = nn.Linear(input_size, output_size, bias=False)
|
||||||
|
|
||||||
|
#custom linear regression model (just a wrapper in this case, but you can add more layers)
|
||||||
|
class LinearRegression(nn.Module):
|
||||||
|
def __init__(self, input_dim, output_dim, bias=True):
|
||||||
|
super(LinearRegression, self).__init__()
|
||||||
|
#define layers
|
||||||
|
self.lin = nn.Linear(input_dim, output_dim, bias)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
return self.lin(x)
|
||||||
|
|
||||||
|
model = LinearRegression(input_size, output_size, bias=False)
|
||||||
|
|
||||||
|
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
|
||||||
|
|
||||||
|
#Training
|
||||||
|
learning_rate = .01
|
||||||
|
n_iters = 100
|
||||||
|
|
||||||
|
loss = nn.MSELoss() # use pytorch built in MSE loss function
|
||||||
|
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate
|
||||||
|
|
||||||
|
|
||||||
|
for epoch in range(n_iters):
|
||||||
|
# prediction = forward pass
|
||||||
|
y_pred = model(X)
|
||||||
|
|
||||||
|
# loss
|
||||||
|
l = loss(Y, y_pred)
|
||||||
|
|
||||||
|
# gradients = backward pass
|
||||||
|
l.backward()
|
||||||
|
|
||||||
|
#update weights
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
# clear gradients
|
||||||
|
optimizer.zero_grad()
|
||||||
|
|
||||||
|
if epoch % 10 == 0: #every nth epoch
|
||||||
|
[w] = model.parameters()
|
||||||
|
print(f'epoch {epoch+1}: w = {w[0].item():.3f}, loss = {l:.8f}')
|
||||||
|
|
||||||
|
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')
|
||||||
11
06_training pipeline.md
Normal file
11
06_training pipeline.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Training Pipeline
|
||||||
|
|
||||||
|
a training pipeline generally consists of 3 steps:
|
||||||
|
1. Design model (input, output size, forward pass (layers))
|
||||||
|
2. Construct loss and optimizer
|
||||||
|
3. Training loop
|
||||||
|
- forward pass: compute prediction
|
||||||
|
- backward pass: gradient computation
|
||||||
|
- update parameters
|
||||||
|
|
||||||
|
(iterate step 3)
|
||||||
11
README.md
11
README.md
@@ -10,4 +10,13 @@ pyenv local 3.7.7
|
|||||||
source bin/activate
|
source bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
video is in directory "Video"
|
create venv:
|
||||||
|
```
|
||||||
|
python -m venv .
|
||||||
|
```
|
||||||
|
|
||||||
|
install requirements
|
||||||
|
```
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116
|
||||||
|
```
|
||||||
Reference in New Issue
Block a user