.. _200PerceptronMNISTrst: ================================== 200 - First percepton with pytorch ================================== .. only:: html **Links:** :download:`notebook <200_Perceptron_MNIST.ipynb>`, :downloadlink:`html <200_Perceptron_MNIST2html.html>`, :download:`PDF <200_Perceptron_MNIST.pdf>`, :download:`python <200_Perceptron_MNIST.py>`, :downloadlink:`slides <200_Perceptron_MNIST.slides.html>`, :githublink:`GitHub|_doc/notebooks/101/200_Perceptron_MNIST.ipynb|*` First perception on MNIST database. **Note:** install `tqdm `__ if not installed: ``!pip install tqdm`` .. code:: ipython3 import time import numpy as np import pandas as pd import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable print("torch", torch.__version__) from torchvision import datasets, transforms from tqdm import tqdm .. parsed-literal:: torch 1.5.0+cpu .. code:: ipython3 %matplotlib inline .. code:: ipython3 BATCH_SIZE = 64 TEST_BATCH_SIZE = 64 DATA_DIR = 'data/' USE_CUDA = True N_EPOCHS = 100 .. code:: ipython3 train_loader = torch.utils.data.DataLoader( datasets.MNIST(DATA_DIR, train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=BATCH_SIZE, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST(DATA_DIR, train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=TEST_BATCH_SIZE, shuffle=True) .. code:: ipython3 data, target = next(i for i in train_loader) .. code:: ipython3 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.fc1 = nn.Linear(28*28, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = x.view(-1, 28*28) x = torch.tanh(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=-1) .. code:: ipython3 model = Net() if USE_CUDA: try: model = model.cuda() except Exception as e: print(e) USE_CUDA = False N_EPOCHS = 5 .. parsed-literal:: Torch not compiled with CUDA enabled .. code:: ipython3 optimizer = optim.Adam(model.parameters()) .. code:: ipython3 def train(epoch, verbose=True): model.train() losses = [] loader = tqdm(train_loader, total=len(train_loader)) for batch_idx, (data, target) in enumerate(loader): if USE_CUDA: data, target = data.cuda(), target.cuda() data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() losses.append(float(loss.data.item())) if verbose and batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) return np.mean(losses) .. code:: ipython3 def test(verbose=True): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: if USE_CUDA: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability correct += pred.eq(target.data.view_as(pred)).cpu().sum().item() test_loss /= len(test_loader.dataset) if verbose: print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset))) return [float(test_loss), correct] .. code:: ipython3 perfs = [] for epoch in range(1, N_EPOCHS + 1): t0 = time.time() train_loss = train(epoch, verbose=False) test_loss, correct = test(verbose=False) perfs.append([epoch, train_loss, test_loss, correct, len(test_loader.dataset), time.time() - t0]) print("epoch {}: train loss {:.4f}, test loss {:.4f}, accuracy {}/{} in {:.2f}s".format(*perfs[-1])) .. parsed-literal:: 100%|██████████| 938/938 [00:18<00:00, 52.70it/s] c:\python372_x64\lib\site-packages\ipykernel_launcher.py:8: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead. .. parsed-literal:: epoch 1: train loss 0.4866, test loss 0.2546, accuracy 9259/10000 in 20.77s .. parsed-literal:: 100%|██████████| 938/938 [00:18<00:00, 51.27it/s] .. parsed-literal:: epoch 2: train loss 0.3374, test loss 0.2236, accuracy 9337/10000 in 20.57s .. parsed-literal:: 100%|██████████| 938/938 [00:18<00:00, 51.55it/s] .. parsed-literal:: epoch 3: train loss 0.3091, test loss 0.2040, accuracy 9392/10000 in 20.66s .. parsed-literal:: 100%|██████████| 938/938 [00:18<00:00, 50.80it/s] .. parsed-literal:: epoch 4: train loss 0.2951, test loss 0.2035, accuracy 9382/10000 in 20.70s .. parsed-literal:: 100%|██████████| 938/938 [00:18<00:00, 50.85it/s] .. parsed-literal:: epoch 5: train loss 0.2848, test loss 0.1877, accuracy 9439/10000 in 20.77s .. code:: ipython3 df_perfs = pd.DataFrame(perfs, columns=["epoch", "train_loss", "test_loss", "accuracy", "n_test", "time"]) df_perfs .. raw:: html
epoch train_loss test_loss accuracy n_test time
0 1 0.486636 0.254567 9259 10000 20.770426
1 2 0.337395 0.223594 9337 10000 20.567967
2 3 0.309106 0.204043 9392 10000 20.660723
3 4 0.295136 0.203547 9382 10000 20.696622
4 5 0.284773 0.187705 9439 10000 20.769429
.. code:: ipython3 df_perfs[["train_loss", "test_loss"]].plot(); .. image:: 200_Perceptron_MNIST_14_0.png .. code:: ipython3 df_perfs[["train_loss", "test_loss"]].plot(ylim=(0, 0.2)); .. image:: 200_Perceptron_MNIST_15_0.png