From 3ce77417fe00114580d875c7b525d7b0eb8a43c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joseph=20Hopfm=C3=BCller?= Date: Mon, 17 Oct 2022 13:04:51 +0200 Subject: [PATCH] finish chapter 6 --- .gitattributes | 1 - 05_01_gradient.py | 9 +++- 05_02_gradient_autograd.py | 9 +++- 06_01_gradient_torch_loss_optim.py | 0 06_02_gradient_torch_model.py | 0 06_03_gradient_torch_loss_optim.py | 52 +++++++++++++++++++++++ 06_04_gradient_torch_model.py | 68 ++++++++++++++++++++++++++++++ 06_training pipeline.md | 11 +++++ README.md | 11 ++++- 9 files changed, 155 insertions(+), 6 deletions(-) delete mode 100644 06_01_gradient_torch_loss_optim.py delete mode 100644 06_02_gradient_torch_model.py create mode 100644 06_03_gradient_torch_loss_optim.py create mode 100644 06_04_gradient_torch_model.py create mode 100644 06_training pipeline.md diff --git a/.gitattributes b/.gitattributes index f8ff2b5..e69de29 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +0,0 @@ -*.mp4 filter=lfs diff=lfs merge=lfs -text diff --git a/05_01_gradient.py b/05_01_gradient.py index cd722be..4c15c78 100644 --- a/05_01_gradient.py +++ b/05_01_gradient.py @@ -1,9 +1,14 @@ -import numpy as np +# prediction manual +# gradient computation manual +# loss computation manual +# parameter update manual # linear regression, no bias -# f = w*x # f = 2*x +import numpy as np + + X = np.array([1, 2, 3, 4], dtype=np.float32) Y = np.array([2, 4, 6, 8], dtype=np.float32) diff --git a/05_02_gradient_autograd.py b/05_02_gradient_autograd.py index 7cde354..fa7df97 100644 --- a/05_02_gradient_autograd.py +++ b/05_02_gradient_autograd.py @@ -1,9 +1,14 @@ -import torch +# prediction manual +# gradient computation autograd -> gradient computation gets replaced by backward() +# loss computation manual +# parameter update manual # linear regression, no bias -# f = w*x # f = 2*x +import torch + + X = torch.tensor([1, 2, 3, 4], dtype=torch.float32) Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32) diff --git a/06_01_gradient_torch_loss_optim.py b/06_01_gradient_torch_loss_optim.py deleted file mode 100644 index e69de29..0000000 diff --git a/06_02_gradient_torch_model.py b/06_02_gradient_torch_model.py deleted file mode 100644 index e69de29..0000000 diff --git a/06_03_gradient_torch_loss_optim.py b/06_03_gradient_torch_loss_optim.py new file mode 100644 index 0000000..f586204 --- /dev/null +++ b/06_03_gradient_torch_loss_optim.py @@ -0,0 +1,52 @@ +# prediction manual +# gradient computation autograd +# loss computation pytorch loss -> loss function gets replaced by pytorch function +# parameter update pytorch optimizer -> update weights gets replaced by optimizer.step() + +import torch +import torch.nn as nn # neural network module + + +# linear regression, no bias +# f = w*x +# f = 2*x + +X = torch.tensor([1, 2, 3, 4], dtype=torch.float32) +Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32) + +w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True) #requires grad for gradient + +# model prediction +def forward(x): + return w*x + +print(f'Prediction before training: f(5) = {forward(5):.3f}') + +#Training +learning_rate = .01 +n_iters = 100 + +loss = nn.MSELoss() # use pytorch built in MSE loss function +optimizer = torch.optim.SGD([w], lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate + + +for epoch in range(n_iters): + # prediction = forward pass + y_pred = forward(X) + + # loss + l = loss(Y, y_pred) + + # gradients = backward pass + l.backward() + + #update weights + optimizer.step() + + # clear gradients + optimizer.zero_grad() + + if epoch % 10 == 0: #every nth epoch + print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}') + +print(f'Prediction after training: f(5) = {forward(5):.3f}') diff --git a/06_04_gradient_torch_model.py b/06_04_gradient_torch_model.py new file mode 100644 index 0000000..968be8d --- /dev/null +++ b/06_04_gradient_torch_model.py @@ -0,0 +1,68 @@ +# prediction pytorch model -> forward function gets replaced by pytroch model +# gradient computation autograd +# loss computation pytorch loss +# parameter update pytorch optimizer + +import torch +import torch.nn as nn # neural network module + + +# linear regression, no bias +# f = w*x +# f = 2*x + +X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32) #reshape for pytorch model +Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32) +X_test = torch.tensor([5], dtype=torch.float32) + +n_samples, n_features = X.shape +print(n_samples, n_features) + +input_size = n_features +output_size = n_features + +# model = nn.Linear(input_size, output_size, bias=False) + +#custom linear regression model (just a wrapper in this case, but you can add more layers) +class LinearRegression(nn.Module): + def __init__(self, input_dim, output_dim, bias=True): + super(LinearRegression, self).__init__() + #define layers + self.lin = nn.Linear(input_dim, output_dim, bias) + + def forward(self, x): + return self.lin(x) + +model = LinearRegression(input_size, output_size, bias=False) + +print(f'Prediction before training: f(5) = {model(X_test).item():.3f}') + +#Training +learning_rate = .01 +n_iters = 100 + +loss = nn.MSELoss() # use pytorch built in MSE loss function +optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate + + +for epoch in range(n_iters): + # prediction = forward pass + y_pred = model(X) + + # loss + l = loss(Y, y_pred) + + # gradients = backward pass + l.backward() + + #update weights + optimizer.step() + + # clear gradients + optimizer.zero_grad() + + if epoch % 10 == 0: #every nth epoch + [w] = model.parameters() + print(f'epoch {epoch+1}: w = {w[0].item():.3f}, loss = {l:.8f}') + +print(f'Prediction after training: f(5) = {model(X_test).item():.3f}') diff --git a/06_training pipeline.md b/06_training pipeline.md new file mode 100644 index 0000000..4916c77 --- /dev/null +++ b/06_training pipeline.md @@ -0,0 +1,11 @@ +# Training Pipeline + +a training pipeline generally consists of 3 steps: +1. Design model (input, output size, forward pass (layers)) +2. Construct loss and optimizer +3. Training loop + - forward pass: compute prediction + - backward pass: gradient computation + - update parameters + +(iterate step 3) \ No newline at end of file diff --git a/README.md b/README.md index 9e74a5f..21500ad 100644 --- a/README.md +++ b/README.md @@ -10,4 +10,13 @@ pyenv local 3.7.7 source bin/activate ``` -video is in directory "Video" \ No newline at end of file +create venv: +``` +python -m venv . +``` + +install requirements +``` +python -m pip install --upgrade pip +pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu116 +``` \ No newline at end of file