Here is how to use PyTorch to create and train a simple neural network on a dataset.
import numpy as np
import matplotlib.pyplot as plt
import torch
import dataset
import copy
We will use PyTorch's Dataset
class.
from torch.utils.data import Dataset, DataLoader
# ptAnnuli is a torch.utils.data.Dataset wrapper for dataset.Annuli
# It allows us to use Annuli as a PyTorch Dataset.
class ptAnnuli(Dataset):
def __init__(self, n=300, noise=1.):
self.np_ds = dataset.Annuli(n=n, noise=noise)
def __getitem__(self, idx):
x = self.np.ds.X[idx]
t = self.np_ds.T[idx]
return torch.tensor(x, dtype=torch.float), torch.tensor(t, dtype=torch.float)
def __len__(self):
return len(self.np_ds.samples)
def inputs(self):
return torch.tensor(self.np_ds.X, dtype=torch.float)
def targets(self):
return torch.tensor(self.np_ds.T, dtype=torch.long).squeeze()
def plot(self, labels=None, **kwargs):
if labels is None:
self.np_ds.plot(**kwargs)
else:
labels=labels.detach().numpy()
self.np_ds.plot(labels=labels, **kwargs)
ds = ptAnnuli(n=1000, noise=1.)
ds.plot()
print(ds.inputs()[:5])
tensor([[ 0.5858, -0.1610], [ 0.0578, -0.1476], [-0.5139, 0.3168], [ 0.0262, 0.0137], [-0.0068, -0.0130]])
print(ds.targets()[:5])
tensor([1, 0, 1, 0, 0])
More flexibility, since you get to specify the forward
function.
class mynet(torch.nn.Module):
def __init__(self):
super().__init__()
self.lyr = torch.nn.ModuleList() # self.lyr = [] DOES NOT WORK
self.lyr.append(torch.nn.Linear(2,10, bias=True))
self.lyr.append(torch.nn.Sigmoid())
self.lyr.append(torch.nn.Linear(10,3))
#self.lyr.append(torch.nn.Softmax())
self.lyr.append(torch.nn.LogSoftmax(dim=1))
def forward(self, x):
y = x
for l in self.lyr:
y = l(y)
return y
net = mynet() # <=== Create the network model
ds.plot(labels=y)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[10], line 1 ----> 1 ds.plot(labels=y) NameError: name 'y' is not defined
Choose a loss function.
#loss = torch.nn.CrossEntropyLoss(reduction='mean')
loss_fcn = torch.nn.NLLLoss(reduction='mean') # <=== Choose a cost function
x = ds.inputs()
classes = ds.targets()
kappa = 1.
n_epochs = 1000
losses = []
for epoch in range(n_epochs):
# Forward pass
y = net(x)
# Calculate the loss
err = loss_fcn(y, classes) # for CE
losses.append(err.item())
# Backpropagate the gradient of the loss
net.zero_grad()
err.backward()
# Increment the weights and biases
with torch.no_grad():
for p in net.parameters():
p -= kappa * p.grad
plt.plot(losses);
y = net(x)
print(y[:5])
print(torch.exp(y[:5]))
tensor([[-2.9053, -0.8127, -0.6900], [-0.2509, -1.5736, -4.2254], [-3.3338, -0.8433, -0.6273], [-0.1800, -1.8490, -4.9116], [-0.1750, -1.8747, -4.9397]], grad_fn=<SliceBackward0>) tensor([[0.0547, 0.4437, 0.5016], [0.7781, 0.2073, 0.0146], [0.0357, 0.4303, 0.5340], [0.8352, 0.1574, 0.0074], [0.8394, 0.1534, 0.0072]], grad_fn=<ExpBackward0>)
classes[:5]
tensor([1, 0, 1, 0, 0])
ds.plot(labels=y)