From 34b6999e398da24a802de5176f57b067494edfd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Joseph=20Hopfm=C3=BCller?= Date: Mon, 17 Oct 2022 00:21:56 +0200 Subject: [PATCH] finished chapter 5 --- 04_backpropagation.py | 19 ++++++++++++ 05_01_gradient.py | 48 ++++++++++++++++++++++++++++++ 05_02_gradient_autograd.py | 46 ++++++++++++++++++++++++++++ 06_01_gradient_torch_loss_optim.py | 0 06_02_gradient_torch_model.py | 0 5 files changed, 113 insertions(+) create mode 100644 05_01_gradient.py create mode 100644 05_02_gradient_autograd.py create mode 100644 06_01_gradient_torch_loss_optim.py create mode 100644 06_02_gradient_torch_model.py diff --git a/04_backpropagation.py b/04_backpropagation.py index e69de29..075b7f4 100644 --- a/04_backpropagation.py +++ b/04_backpropagation.py @@ -0,0 +1,19 @@ +import torch + +x = torch.tensor(1.0) +y = torch.tensor(2.0) + +w = torch.tensor(1.0, requires_grad=True) + +#forward path and compute loss +y_hat = w*x +loss = (y_hat-y)**2 + +print(loss) + +#backward path +loss.backward() +print(w.grad) + +### update weights +### next forward and backwards \ No newline at end of file diff --git a/05_01_gradient.py b/05_01_gradient.py new file mode 100644 index 0000000..cd722be --- /dev/null +++ b/05_01_gradient.py @@ -0,0 +1,48 @@ +import numpy as np + +# linear regression, no bias +# f = w*x +# f = 2*x + +X = np.array([1, 2, 3, 4], dtype=np.float32) +Y = np.array([2, 4, 6, 8], dtype=np.float32) + +w = 0.0 + +# model prediction +def forward(x): + return w*x + +# loss = MSE +def loss(y, y_pred): + return ((y_pred - y)**2).mean() + +# gradient +# mse = 1/N * (w*x - y)**2 +# dJ/dw = 1/N * 2x * (w*x - y) +def gradient(x, y, y_pred): + return np.dot(2*x, y_pred-y).mean() + +print(f'Prediction before training: f(5) = {forward(5):.3f}') + +#Training +learning_rate = .01 +n_iters = 20 + +for epoch in range(n_iters): + # prediction = forward pass + y_pred = forward(X) + + # loss + l = loss(Y, y_pred) + + # gradients + dw = gradient(X, Y, y_pred) + + #update weights + w -= learning_rate*dw + + if epoch % 2 == 0: #every nth epoch + print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}') + +print(f'Prediction after training: f(5) = {forward(5):.3f}') diff --git a/05_02_gradient_autograd.py b/05_02_gradient_autograd.py new file mode 100644 index 0000000..7cde354 --- /dev/null +++ b/05_02_gradient_autograd.py @@ -0,0 +1,46 @@ +import torch + +# linear regression, no bias +# f = w*x +# f = 2*x + +X = torch.tensor([1, 2, 3, 4], dtype=torch.float32) +Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32) + +w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True) #requires grad for gradient + +# model prediction +def forward(x): + return w*x + +# loss = MSE +def loss(y, y_pred): + return ((y_pred - y)**2).mean() + + +print(f'Prediction before training: f(5) = {forward(5):.3f}') + +#Training +learning_rate = .01 +n_iters = 100 + +for epoch in range(n_iters): + # prediction = forward pass + y_pred = forward(X) + + # loss + l = loss(Y, y_pred) + + # gradients = backward pass + l.backward() + + #update weights + with torch.no_grad(): + w -= learning_rate*w.grad + + w.grad.zero_() + + if epoch % 10 == 0: #every nth epoch + print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}') + +print(f'Prediction after training: f(5) = {forward(5):.3f}') diff --git a/06_01_gradient_torch_loss_optim.py b/06_01_gradient_torch_loss_optim.py new file mode 100644 index 0000000..e69de29 diff --git a/06_02_gradient_torch_model.py b/06_02_gradient_torch_model.py new file mode 100644 index 0000000..e69de29