Creating a Neural Network Class
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
Inherit from nn.Module to define the neural network (NN) architecture, optimizer, and loss function. The optimizer adjusts the neural network parameters to minimize the loss computed by the loss function.
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
super(Model, self).__init__()
# NN layers
self.fc1 = nn.Linear(input_dim, 100)
self.fc2 = nn.Linear(100, 100)
self.fc3 = nn.Linear(100, output_dim)
# Learning rate and parameter update method
self.optimizer = optim.SGD(self.parameters(), lr=0.01)
# Loss function
self.criterion = nn.MSELoss()
Define the activation functions:
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
Compute gradients via backpropagation and update parameters with the optimizer:
def update(self,output, y):
self.optimizer.zero_grad()
loss = self.criterion(output, y)
# Compute gradients
loss.backward()
# Update parameters
self.optimizer.step()
return loss
Experiment
# Create test data
def math(x):
return x*x + 2*x + 1
def make():
x = np.random.rand(1000)
y = math(x)
return x,y
train_x, train_y = make()
test_x, test_y = make()
model = Model(1,1)
# Evaluate on test data before training
gosa = 0
for i in range(1000):
output = model(torch.tensor([[float(test_x[i])]]))
gosa += abs((output - test_y[i]))
print("Before training:", gosa/1000)
# Train on training data
for i in range(1000):
output = model(torch.tensor([[float(train_x[i])]]))
loss = model.update(output, torch.tensor([[float(train_y[i])]]))
# Evaluate on test data after training
gosa = 0
for i in range(1000):
output = model(torch.tensor([[float(test_x[i])]]))
gosa += abs((output - test_y[i])[0])
print("After training: error/1000")
Output:
Before training: tensor([[2.4048]], grad_fn=<DivBackward0>)
After training: tensor([0.0174], grad_fn=<DivBackward0>)