# this is a recap # a training pipeline generally consists of 3 steps: # 1. Design model (input, output size, forward pass (layers)) # 2. Construct loss and optimizer # 3. Training loop # - forward pass: compute prediction # - backward pass: gradient computation # - update parameters # (iterate step 3) import torch import torch.nn as nn X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32) Y = torch.tensor([[3], [6], [9], [12]], dtype=torch.float32) X_test = torch.tensor([5], dtype=torch.float32) n_samples, n_features = X.shape input_size = output_size = n_features learning_rate = 0.01 n_iter = 100 model = nn.Linear(input_size, output_size, bias=False) loss = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate) print(f'Prediction before training: f(5) = {model(X_test).item():.3f}') for epoch in range(n_iter): Y_pred = model(X) l = loss(Y, Y_pred) l.backward() optimizer.step() optimizer.zero_grad() [w] = model.parameters() w = w.item() if epoch % 10 == 0: print(f'Epoch {epoch}: w = {w:.3f}, loss = {l:.5f}') print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')