# prediction pytorch model -> forward function gets replaced by pytroch model # gradient computation autograd # loss computation pytorch loss # parameter update pytorch optimizer import torch import torch.nn as nn # neural network module # linear regression, no bias # f = w*x # f = 2*x X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32) #reshape for pytorch model Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32) X_test = torch.tensor([5], dtype=torch.float32) n_samples, n_features = X.shape print(n_samples, n_features) input_size = n_features output_size = n_features # model = nn.Linear(input_size, output_size, bias=False) #custom linear regression model (just a wrapper in this case, but you can add more layers) class LinearRegression(nn.Module): def __init__(self, input_dim, output_dim, bias=True): super(LinearRegression, self).__init__() #define layers self.lin = nn.Linear(input_dim, output_dim, bias) def forward(self, x): return self.lin(x) model = LinearRegression(input_size, output_size, bias=False) print(f'Prediction before training: f(5) = {model(X_test).item():.3f}') #Training learning_rate = .01 n_iters = 100 loss = nn.MSELoss() # use pytorch built in MSE loss function optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate) # use pytorch built in optimizer to optimize parameter 'w' with learning rate for epoch in range(n_iters): # prediction = forward pass y_pred = model(X) # loss l = loss(Y, y_pred) # gradients = backward pass l.backward() #update weights optimizer.step() # clear gradients optimizer.zero_grad() if epoch % 10 == 0: #every nth epoch [w] = model.parameters() print(f'epoch {epoch+1}: w = {w[0].item():.3f}, loss = {l:.8f}') print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')