finish 09
This commit is contained in:
60
07_linear_regression.py
Normal file
60
07_linear_regression.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# this is a recap
|
||||
|
||||
# a training pipeline generally consists of 3 steps:
|
||||
# 1. Design model (input, output size, forward pass (layers))
|
||||
# 2. Construct loss and optimizer
|
||||
# 3. Training loop
|
||||
# - forward pass: compute prediction
|
||||
# - backward pass: gradient computation
|
||||
# - update parameters
|
||||
|
||||
# (iterate step 3)
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import numpy as np
|
||||
from sklearn import datasets
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
# 0. data preparation
|
||||
X_numpy, Y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=1)
|
||||
|
||||
X = torch.from_numpy(X_numpy.astype(np.float32))
|
||||
y = torch.from_numpy(Y_numpy.astype(np.float32))
|
||||
y = y.view(y.shape[0], 1) # reshape into column vector
|
||||
|
||||
n_samples, n_features = X.shape
|
||||
|
||||
# 1. model
|
||||
input_size = n_features
|
||||
output_size = 1
|
||||
model = nn.Linear(input_size, output_size)
|
||||
|
||||
# 2. loss and optimizer
|
||||
learning_rate = .01
|
||||
criterion = nn.MSELoss()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
|
||||
|
||||
# 3. training loop
|
||||
n_epochs = 10000
|
||||
for epoch in range(n_epochs):
|
||||
#forward pass and loss
|
||||
y_pred = model(X)
|
||||
loss = criterion(y, y_pred)
|
||||
|
||||
#backward pass
|
||||
loss.backward()
|
||||
|
||||
#update
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
if (epoch+1) % (n_epochs/10) == 0:
|
||||
print(f'Epoch {epoch+1}: loss = {loss.item():.4f}')
|
||||
|
||||
# plot
|
||||
|
||||
predicted = model(X).detach().numpy()
|
||||
plt.plot(X_numpy, Y_numpy, 'ro')
|
||||
plt.plot(X_numpy, predicted, 'b')
|
||||
plt.show()
|
||||
Reference in New Issue
Block a user