PyTorchによるMNIST手書きデジタルデータセットの識別

3048 ワード

MNISTデータセットをボリュームネットワークで分類し,0~9の手書きデジタル認識を実現し,ボリュームニューラルネットワークの入門操作である.(1)データロード,(2)モデル構築,(3)モデルトレーニングと保存,(4)モデル呼び出しとテストを含む.具体的なコードは以下の通りです.
import torch
import torch.nn
import torch.utils.data
import torchvision.datasets
import torchvision.transforms
import matplotlib.pyplot as plt 

#read the data
train_dataset = torchvision.datasets.MNIST(root='./data/mnist',
                                           train=True,
                                           transform=torchvision.transforms.ToTensor(),
                                           download=True)
test_dataset = torchvision.datasets.MNIST(root='./data/mnist',
                                           train=False,
                                           transform=torchvision.transforms.ToTensor(),
                                           download=True)

batch_size=100
train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset, batch_size = batch_size
    )
test_loader = torch.utils.data.DataLoader(
    dataset=test_dataset, batch_size=batch_size
)
print('len(train_loader)={}'.format(len(train_loader)))
print('len(train_loader)={}'.format(len(test_loader)))

#define the Net Structure
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()   
        self.conv0 = torch.nn.Conv2d(1, 64, kernel_size=3, padding=1)
        self.relu1 = torch.nn.ReLU()
        self.conv2 = torch.nn.Conv2d(64, 128, kernel_size=3, padding=1)
        self.relu3 = torch.nn.ReLU()
        self.pool4 = torch.nn.MaxPool2d(stride=2, kernel_size=2)
        self.fc5 = torch.nn.Linear(128*14*14, 1024)
        self.relu6 = torch.nn.ReLU()
        self.drop7 = torch.nn.Dropout(p=0.5)
        self.fc8 = torch.nn.Linear(1024, 10)

    def forward(self, x):
        x=self.conv0(x)
        x=self.relu1(x)
        x=self.conv2(x)
        x=self.relu3(x)
        x=self.pool4(x)
        x=x.view(-1,128*14*14)
        x=self.fc5(x)
        x=self.relu6(x)
        x=self.drop7(x)
        x=self.fc8(x)
        return x
    
net = Net()
print(net)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters())

#train the Net
num_epochs = 5
for epoch in range(num_epochs):
    for idx, (images, lables) in enumerate(train_loader):
        optimizer.zero_grad()
        preds = net(images)
        loss = criterion(preds, lables)
        loss.backward()
        optimizer.step()

        if idx %5 ==0:
            print('epoch{}, batch{}, loss={:g}'.format(
                epoch, idx, loss.item()
            ))
            
#save the trained net
torch.save(net, 'net.pkl')

#load the trained net
net1 = torch.load('net.pkl')

#test the trained net
correct=0
total=1
for images, labels in test_loader:
    preds = net(images)
    predicted = torch.argmax(preds, 1)
    total += lables.size(0)
    correct += (predicted == labels).sum().item()

accuracy = correct/total
print('accuracy of test data:{:.1%}'.format(accuracy))