Linear Model with Pytorch

Linear Model with Pytorch

  • 이 글의 목적은, 지난 Linear Regression 에서 좀더 나아가서, 다양한 Regression 예제들을 Linear Model (WX) 형태로 pytorch 를 이용해 풀어 보는 것입니다.
  • Pytorch 를 사용하여 Modeling 과 loss function 등을 class 형태, 내장 loss 함수등을 사용해보겠습니다.
1
2
3
4
5
6
7
8
9
10
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F

import numpy as np
import warnings
warnings.filterwarnings("ignore")
%config InlineBackend.figure_format = 'retina'
%matplotlib inline

1. Quadratic Regression Model

$$
f(x) = w_0 + w_1x + w_2x^2
$$

1
2
3
4
5
6
7
8
x = np.linspace(-10, 10, 100)
y = x**2 + 0.7 * x + 3.0 + 20 * np.random.rand(len(x))

plt.plot(x, y, 'o')
plt.grid()
plt.xlabel('x')
plt.ylabel('y')
plt.show()

png

1
2
3
4
5
x_train = torch.FloatTensor([[each_x**2, each_x, 1] for each_x in x])
y_train = torch.FloatTensor(y)

print("x_train shape: ", x_train.shape)
print("y_train shape: ", y_train.shape)
x_train shape:  torch.Size([100, 3])
y_train shape:  torch.Size([100])
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
W = torch.zeros(3, requires_grad=True)

optimizer = optim.SGD([W], lr=0.0001)

epochs = 10000

for epoch in range(1, epochs + 1):
hypothesis = x_train.matmul(W)

loss = torch.mean((hypothesis - y_train) ** 2)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if epoch % 1000 == 0:
print("epoch: {} -- Parameters: W: {} -- loss {}".format(epoch, W.data, loss.data))
epoch: 1000 -- Parameters: W: tensor([1.1738, 0.4943, 1.0699]) -- loss 85.30189514160156
epoch: 2000 -- Parameters: W: tensor([1.1581, 0.4949, 2.0311]) -- loss 76.05414581298828
epoch: 3000 -- Parameters: W: tensor([1.1437, 0.4949, 2.9105]) -- loss 68.31205749511719
epoch: 4000 -- Parameters: W: tensor([1.1305, 0.4949, 3.7151]) -- loss 61.83049011230469
epoch: 5000 -- Parameters: W: tensor([1.1185, 0.4949, 4.4514]) -- loss 56.4041862487793
epoch: 6000 -- Parameters: W: tensor([1.1075, 0.4949, 5.1250]) -- loss 51.86140060424805
epoch: 7000 -- Parameters: W: tensor([1.0974, 0.4949, 5.7414]) -- loss 48.058231353759766
epoch: 8000 -- Parameters: W: tensor([1.0882, 0.4949, 6.3054]) -- loss 44.8742790222168
epoch: 9000 -- Parameters: W: tensor([1.0798, 0.4949, 6.8214]) -- loss 42.20869445800781
epoch: 10000 -- Parameters: W: tensor([1.0721, 0.4949, 7.2935]) -- loss 39.97709655761719
1
2
3
4
5
plt.plot(x, y, 'o', label='train data')
plt.plot(x, (x_train.data.matmul(W.data).numpy()), '-r', linewidth=3, label='fitted')
plt.grid()
plt.legend()
plt.show()

png

2. Cubic Regression Model

$$
f(x) = w_0 + w_1x + w_2x^2 + w_3x^3
$$

2.1 Generate Toy data

  • 100개의 data 를 생성합니다.
1
2
x = np.linspace(-1, 1, 100)
y = 3*x**3 - 0.2 * x ** 2 + 0.7 * x + 3 + 0.5 * np.random.rand(len(x))
1
2
3
4
5
plt.plot(x, y, 'o')
plt.grid()
plt.xlabel('x')
plt.ylabel('y')
plt.show()

png

2.2 Define Model

  • x_train과 y_train 을 만들어줍니다.
1
2
3
x_train = torch.FloatTensor([[xval**3, xval**2, xval, 1]for xval in x])
y_train = torch.FloatTensor([y]).view(100, -1)
y_train.shape
torch.Size([100, 1])
  • 이번에 Model을 nn.Module 추상 클래스를 상속 받아, class 형태로 모델링 해보겠습니다.
1
2
3
4
5
6
7
class CubicModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 1)

def forward(self, x):
return self.linear(x)
1
model = CubicModel()
  • train 시킬 때, loss 역시 nn.functional 에 있는 내장 mse loss 를 사용하여 보겠습니다.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
optimizer = optim.SGD(model.parameters(), lr=0.001)

epochs = 15000

for epoch in range(1, epochs + 1):
hypothesis = model(x_train)

# define loss
loss = F.mse_loss(hypothesis, y_train)

# Backprop & update parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()

if epoch % 1500 == 0:
print("epoch: {} -- loss {}".format(epoch, loss.data))


epoch: 1500 -- loss 0.22306101024150848
epoch: 3000 -- loss 0.11560291796922684
epoch: 4500 -- loss 0.09848319739103317
epoch: 6000 -- loss 0.08879078179597855
epoch: 7500 -- loss 0.08104882389307022
epoch: 9000 -- loss 0.07452096790075302
epoch: 10500 -- loss 0.06889640539884567
epoch: 12000 -- loss 0.06398065388202667
epoch: 13500 -- loss 0.05964164435863495
epoch: 15000 -- loss 0.055785566568374634
1
2
3
4
5
plt.plot(x, y, 'o', label='train data')
plt.plot(x, model(x_train).data.numpy(), '-r', linewidth=3, label='fitted')
plt.grid()
plt.legend()
plt.show()

png

3. Exponential Regression Model

$$
f(x) = e^{w_0x}
$$

$$
g(x) = \ln f(x) = w_0x
$$

  • Exponential 의 경우, Linear Model 형태를 만들어 주기 위해, log 를 씌워 주워 train 을 시킨후, 다시 exponential 을 양변에 취해주는 형태로 modeling 을 하여야 한다.

3.1 Generate Toy data

1
2
3
np.random.seed(20190505)
x = np.linspace(-1, 1, 50)
y = np.exp(2 * x) + 0.2 * (2 * np.random.rand(len(x)) - 1)
1
2
3
4
5
plt.plot(x, y, 'o')
plt.grid()
plt.xlabel('x')
plt.ylabel('y')
plt.show()

png

3.2 Define Model

1
2
3
x_train = torch.FloatTensor([[xval, 1] for xval in x])
y_train = torch.FloatTensor([np.log(y)]).view(50, -1)
y_train.shape
torch.Size([50, 1])
1
2
3
4
5
6
7
class ExpModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)

def forward(self, x):
return self.linear(x)
  • 이번에는 optimize algorithm 중 Adam 을 사용해 보겠습니다.
  • Adam 은 adaptive 하게 learning rate 를 조정해 주는 algorithm 입니다.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
model = ExpModel()

optimizer = optim.Adam(model.parameters())

epochs = 15000

for epoch in range(1, epochs + 1):
hypothesis = model(x_train)

loss = F.mse_loss(hypothesis, y_train)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if epoch % 1000 == 0:
print("epoch: {} -- loss {}".format(epoch, loss.data))
epoch: 1000 -- loss 1.4807509183883667
epoch: 2000 -- loss 0.6568859815597534
epoch: 3000 -- loss 0.2930431365966797
epoch: 4000 -- loss 0.1839657723903656
epoch: 5000 -- loss 0.1683545857667923
epoch: 6000 -- loss 0.16775406897068024
epoch: 7000 -- loss 0.16775156557559967
epoch: 8000 -- loss 0.16775153577327728
epoch: 9000 -- loss 0.16775155067443848
epoch: 10000 -- loss 0.16775155067443848
epoch: 11000 -- loss 0.16775155067443848
epoch: 12000 -- loss 0.16775155067443848
epoch: 13000 -- loss 0.16775153577327728
epoch: 14000 -- loss 0.1677515208721161
epoch: 15000 -- loss 0.1677515208721161
1
2
3
4
5
plt.plot(x, y, 'o', label='train data')
plt.plot(x, np.exp(model(x_train).data.numpy()), '-r', linewidth=3, label='fitted')
plt.grid()
plt.legend()
plt.show()

png

4. Sine & Cosine Regression

$$
f(x) = w_0\cos(\pi x) + w_1\sin(\pi x)
$$

4.1 Generate Toy data

1
2
x = np.linspace(-2, 2, 100)
y = 2 * np.cos(np.pi * x) + 1.5 * np.sin(np.pi * x) + 2 * np.random.rand(len(x)) - 1
1
2
3
4
5
plt.plot(x, y, 'o')
plt.grid()
plt.xlabel('x')
plt.ylabel('y')
plt.show()

png

4.2 Modeling

1
2
3
x_train = torch.FloatTensor([[np.cos(np.pi*xval), np.sin(np.pi*xval), 1] for xval in x])
y_train = torch.FloatTensor(y).view(100, -1)
y_train.shape
torch.Size([100, 1])
1
2
3
4
5
6
7
class SinCosModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 1)

def forward(self, x):
return self.linear(x)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
model = SinCosModel()

optimizer = optim.Adam(model.parameters())

epochs = 10000

for epoch in range(1, epochs + 1):
hypothesis = model(x_train)

loss = F.mse_loss(hypothesis, y_train)

optimizer.zero_grad()
loss.backward()
optimizer.step()

if epoch % 1000 == 0:
print("epoch: {} -- loss {}".format(epoch, loss.data))
epoch: 1000 -- loss 1.1333037614822388
epoch: 2000 -- loss 0.45972707867622375
epoch: 3000 -- loss 0.36056602001190186
epoch: 4000 -- loss 0.3566252291202545
epoch: 5000 -- loss 0.3566077649593353
epoch: 6000 -- loss 0.3566077947616577
epoch: 7000 -- loss 0.3566077649593353
epoch: 8000 -- loss 0.3566077947616577
epoch: 9000 -- loss 0.3566077947616577
epoch: 10000 -- loss 0.3566077649593353
1
2
3
4
5
plt.plot(x, y, 'o', label='train data')
plt.plot(x, model(x_train).data.numpy(), '-r', linewidth=3, label='fitted')
plt.grid()
plt.legend()
plt.show()

png

Author

Emjay Ahn

Posted on

2019-05-04

Updated on

2019-07-17

Licensed under

Comments